source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
ll_bfs_template.h | /*
* ll_bfs_template.h
* LLAMA Graph Analytics
*
* Copyright 2014
* The President and Fellows of Harvard College.
*
* Copyright 2014
* Oracle Labs.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* This file was adapted from Green-Marl, which includes the following notice:
*
* Copyright (c) 2011-2012 Stanford University, unless otherwise specified.
* All rights reserved.
*
* This software was developed by the Pervasive Parallelism Laboratory of
* Stanford University, California, USA.
*
* Permission to use, copy, modify, and distribute this software in source or
* binary form for any purpose with or without fee is hereby granted, provided
* that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Stanford University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef LL_BFS_TEMPLATE_H
#define LL_BFS_TEMPLATE_H
#include <omp.h>
#include <string.h>
#include <map>
#include <set>
#include <unordered_set>
#include <unordered_map>
template<class Graph, typename level_t, bool use_multithread, bool has_navigator,
bool use_reverse_edge, bool save_child>
class ll_bfs_template
{
public:
ll_bfs_template(Graph& _G) :
G(_G) {
visited_bitmap = NULL; // bitmap
visited_level = NULL;
thread_local_next_level = NULL;
down_edge_array = NULL;
down_edge_set = NULL;
down_edge_array_w = NULL;
if (save_child) {
down_edge_set = new std::unordered_set<edge_t>();
}
}
virtual ~ll_bfs_template() {
delete [] visited_bitmap;
delete [] visited_level;
delete [] thread_local_next_level;
delete down_edge_set;
if (down_edge_array != NULL) {
#ifndef FORCE_L0
for (size_t i = 0; i < G.num_levels(); i++) delete[] down_edge_array[i];
#endif
delete[] down_edge_array;
}
}
void prepare(node_t root_node) {
// TODO Is this correct? Do we need to poll a some sort of a runtime?
prepare(root_node, omp_get_max_threads());
}
void prepare(node_t root_node, int max_num_thread) {
int num_thread;
if (use_multithread) {
num_thread = max_num_thread;
} else {
num_thread = 1;
}
max_threads = num_thread;
is_finished = false;
curr_level = 0;
root = root_node;
state = ST_SMALL;
assert(root != LL_NIL_NODE);
if (save_child) {
if (down_edge_set == NULL)
down_edge_set = new std::unordered_set<edge_t>();
}
global_vector.clear();
level_queue_begin.clear();
level_count.clear();
// create local queues
if (thread_local_next_level == NULL) {
thread_local_next_level = new std::vector<node_t>[num_thread];
for (int i = 0; i < num_thread; i++)
thread_local_next_level[i].reserve(THRESHOLD2);
} else {
for (int i = 0; i < num_thread; i++)
thread_local_next_level[i].clear();
}
}
void do_bfs_forward() {
//---------------------------------
// prepare root node
//---------------------------------
curr_level = 0;
curr_count = 0;
next_count = 0;
small_visited[root] = curr_level;
curr_count++;
global_vector.push_back(root);
global_curr_level_begin = 0;
global_next_level_begin = curr_count;
level_count.push_back(curr_count);
level_queue_begin.push_back(global_curr_level_begin);
bool is_done = false;
while (!is_done) {
switch (state) {
case ST_SMALL: {
for (node_t i = 0; i < curr_count; i++) {
node_t t = global_vector[global_curr_level_begin + i];
iterate_neighbor_small(t);
visit_fw(t); // visit after iteration. in that way, one can check down-neighbors quite easily
}
break;
}
case ST_QUE: {
if (use_multithread) // do it in parallel
{
int num_threads = std::min((node_t) max_threads, curr_count/128+1);
#pragma omp parallel num_threads(num_threads)
{
int tid = omp_get_thread_num();
#pragma omp for nowait
for (node_t i = 0; i < curr_count; i++) {
node_t t = global_vector[global_curr_level_begin + i];
iterate_neighbor_que(t, tid);
visit_fw(t);
}
finish_thread_que(tid);
}
}
else { // do it in sequential
int tid = 0;
for (node_t i = 0; i < curr_count; i++) {
//node_t t = global_curr_level[i];
node_t t = global_vector[global_curr_level_begin + i];
iterate_neighbor_que(t, tid);
visit_fw(t);
}
finish_thread_que(tid);
}
break;
}
case ST_Q2R: {
if (use_multithread) { // do it in parallel
int num_threads = std::min((node_t) max_threads, curr_count/128+1);
#pragma omp parallel num_threads(num_threads)
{
node_t local_cnt = 0;
#pragma omp for nowait
for (node_t i = 0; i < curr_count; i++) {
node_t t = global_vector[global_curr_level_begin + i];
iterate_neighbor_rd(t, local_cnt);
visit_fw(t);
}
finish_thread_rd(local_cnt);
}
} else { // do it sequentially
node_t local_cnt = 0;
for (node_t i = 0; i < curr_count; i++) {
//node_t t = global_curr_level[i];
node_t t = global_vector[global_curr_level_begin + i];
iterate_neighbor_rd(t, local_cnt);
visit_fw(t);
}
finish_thread_rd(local_cnt);
}
break;
}
case ST_RD: {
if (use_multithread) { // do it in parallel
#pragma omp parallel
{
node_t local_cnt = 0;
#pragma omp for nowait schedule(dynamic,128)
for (node_t t = 0; t < G.max_nodes(); t++) {
if (visited_level[t] == curr_level) {
iterate_neighbor_rd(t, local_cnt);
visit_fw(t);
}
}
finish_thread_rd(local_cnt);
}
} else { // do it in sequential
node_t local_cnt = 0;
for (node_t t = 0; t < G.max_nodes(); t++) {
if (visited_level[t] == curr_level) {
iterate_neighbor_rd(t, local_cnt);
visit_fw(t);
}
}
finish_thread_rd(local_cnt);
}
break;
}
case ST_R2Q: {
if (use_multithread) { // do it in parallel
#pragma omp parallel
{
int tid = omp_get_thread_num();
#pragma omp for nowait schedule(dynamic,128)
for (node_t t = 0; t < G.max_nodes(); t++) {
if (visited_level[t] == curr_level) {
iterate_neighbor_que(t, tid);
visit_fw(t);
}
}
finish_thread_que(tid);
}
} else {
int tid = 0;
for (node_t t = 0; t < G.max_nodes(); t++) {
if (visited_level[t] == curr_level) {
iterate_neighbor_que(t, tid);
visit_fw(t);
}
}
finish_thread_que(tid);
}
break;
}
} // end of switch
do_end_of_level_fw();
is_done = get_next_state();
} // end of while
}
void do_bfs_reverse() {
// This function should be called only after do_bfs_foward has finished.
// assumption: small-world graph
level_t& level = curr_level;
while (true) {
node_t count = level_count[level];
//node_t* queue_ptr = level_start_ptr[level];
node_t* queue_ptr;
node_t begin_idx = level_queue_begin[level];
if (begin_idx == -1) {
queue_ptr = NULL;
} else {
queue_ptr = & (global_vector[begin_idx]);
}
if (queue_ptr == NULL) {
#pragma omp parallel if (use_multithread)
{
#pragma omp for nowait schedule(dynamic,128)
for (node_t i = 0; i < G.max_nodes(); i++) {
if (visited_level[i] != curr_level) continue;
visit_rv(i);
}
}
} else {
int num_threads = std::min((node_t) max_threads, curr_count/128+1);
#pragma omp parallel num_threads(num_threads) if (use_multithread)
{
#pragma omp for nowait
for (node_t i = 0; i < count; i++) {
node_t u = queue_ptr[i];
visit_rv(u);
}
}
}
do_end_of_level_rv();
if (level == 0) break;
level--;
}
}
bool is_down_edge(edge_t idx) {
if (state == ST_SMALL)
return (down_edge_set->find(idx) != down_edge_set->end());
else {
#ifdef FORCE_L0
return down_edge_array[idx];
#else
size_t level = LL_EDGE_LEVEL(idx);
if (level == LL_WRITABLE_LEVEL) {
return down_edge_array_w[LL_EDGE_GET_WRITABLE(idx)->we_numerical_id];
}
return down_edge_array[level][LL_EDGE_INDEX(idx)];
#endif
}
}
protected:
virtual void visit_fw(node_t t)=0;
virtual void visit_rv(node_t t)=0;
virtual bool check_navigator(node_t t, edge_t nx)=0;
virtual void do_end_of_level_fw() {
}
virtual void do_end_of_level_rv() {
}
node_t get_root() {
return root;
}
level_t get_level(node_t t) {
// GCC expansion
if (__builtin_expect((state == ST_SMALL), 0)) {
if (small_visited.find(t) == small_visited.end())
return __INVALID_LEVEL;
else
return small_visited[t];
} else {
return visited_level[t];
}
}
level_t get_curr_level() {
return curr_level;
}
private:
bool get_next_state() {
//const char* state_name[5] = {"SMALL","QUEUE","Q2R","RD","R2Q"};
if (next_count == 0) return true; // BFS is finished
int next_state = state;
switch (state) {
case ST_SMALL:
if (next_count >= THRESHOLD1) {
prepare_que();
next_state = ST_QUE;
}
break;
case ST_QUE:
if ((next_count >= THRESHOLD2) && (next_count >= curr_count*5)) {
prepare_read();
next_state = ST_Q2R;
}
break;
case ST_Q2R:
next_state = ST_RD;
break;
case ST_RD:
if (next_count <= (2 * curr_count)) {
next_state = ST_R2Q;
}
break;
case ST_R2Q:
next_state = ST_QUE;
break;
}
finish_level(state);
state = next_state;
return false;
}
void finish_level(int state) {
if ((state == ST_RD) || (state == ST_Q2R)) {
// output queue is not valid
} else { // move output queue
//node_t* temp = &(global_next_level[next_count]);
//global_curr_level = global_next_level;
//global_next_level = temp;
global_curr_level_begin = global_next_level_begin;
global_next_level_begin = global_next_level_begin + next_count;
}
curr_count = next_count;
next_count = 0;
curr_level++;
// save 'new current' level status
level_count.push_back(curr_count);
if ((state == ST_RD) || (state == ST_Q2R)) {
//level_start_ptr.push_back(NULL);
level_queue_begin.push_back(-1);
} else {
//level_start_ptr.push_back(global_curr_level);
level_queue_begin.push_back(global_curr_level_begin);
}
}
void iter_begin(ll_edge_iterator& iter, node_t v) {
if (use_reverse_edge) {
G.in_iter_begin_fast(iter, v);
} else {
G.out_iter_begin(iter, v);
}
}
edge_t iter_next(ll_edge_iterator& iter) {
if (use_reverse_edge) {
return G.in_iter_next_fast(iter);
} else {
return G.out_iter_next(iter);
}
}
node_t get_node(ll_edge_iterator& iter) {
return iter.last_node;
}
void iterate_neighbor_small(node_t t) {
ll_edge_iterator iter; iter_begin(iter, t);
for (edge_t nx = iter_next(iter); nx != LL_NIL_EDGE; nx = iter_next(iter)) {
node_t u = get_node(iter);
// check visited
if (small_visited.find(u) == small_visited.end()) {
if (has_navigator) {
if (check_navigator(u, nx) == false) continue;
}
if (save_child) {
save_down_edge_small(nx);
}
small_visited[u] = curr_level + 1;
//global_next_level[next_count++] = u;
global_vector.push_back(u);
next_count++;
}
else if (save_child) {
if (has_navigator) {
if (check_navigator(u, nx) == false) continue;
}
if (small_visited[u] == (curr_level+1)){
save_down_edge_small(nx);
}
}
}
}
// should be used only when save_child is enabled
void save_down_edge_small(edge_t idx) {
down_edge_set->insert(idx);
}
void save_down_edge_large(edge_t idx) {
#ifdef FORCE_L0
down_edge_array[idx] = 1;
#else
size_t level = LL_EDGE_LEVEL(idx);
if (level == LL_WRITABLE_LEVEL) {
down_edge_array_w[LL_EDGE_GET_WRITABLE(idx)->we_numerical_id] = 1;
}
down_edge_array[LL_EDGE_LEVEL(idx)][LL_EDGE_INDEX(idx)] = 1;
#endif
}
void prepare_que() {
global_vector.reserve(G.max_nodes());
// create bitmap and edges
if (visited_bitmap == NULL) {
visited_bitmap = new unsigned char[(G.max_nodes() + 7) / 8];
visited_level = new level_t[G.max_nodes()];
}
if (save_child) {
if (down_edge_array == NULL) {
#ifdef FORCE_L0
down_edge_array = new unsigned char [G.max_edges(0)];
#else
down_edge_array = new unsigned char* [G.num_levels()];
for (size_t i = 0; i < G.num_levels(); i++)
down_edge_array[i] = new unsigned char [G.max_edges(i)];
// Note: This makes sense only if the current graph is writable,
// but fortunatelly it is never accessed unless we are on the
// writable level
down_edge_array_w = down_edge_array[G.num_levels() - 1];
#endif
}
}
if (use_multithread) {
#pragma omp parallel
{
#pragma omp for nowait
for (node_t i = 0; i < (G.max_nodes() + 7) / 8; i++)
visited_bitmap[i] = 0;
#pragma omp for nowait
for (node_t i = 0; i < G.max_nodes(); i++)
visited_level[i] = __INVALID_LEVEL;
if (save_child) {
#ifdef FORCE_L0
#pragma omp for nowait
for (edge_t i = 0; i < G.max_edges(0); i++)
down_edge_array[i] = 0;
#else
#pragma omp for nowait
for (size_t i = 0; i < G.num_levels(); i++)
memset(down_edge_array[i], 0, sizeof(unsigned char) * G.max_edges(i));
#endif
}
}
} else {
for (node_t i = 0; i < (G.max_nodes() + 7) / 8; i++)
visited_bitmap[i] = 0;
for (node_t i = 0; i < G.max_nodes(); i++)
visited_level[i] = __INVALID_LEVEL;
if (save_child) {
#ifdef FORCE_L0
for (edge_t i = 0; i < G.max_edges(0); i++)
down_edge_array[i] = 0;
#else
for (size_t i = 0; i < G.num_levels(); i++)
memset(down_edge_array[i], 0, sizeof(unsigned char) * G.max_edges(i));
#endif
}
}
//typename std::unordered_map<node_t, level_t>::iterator II;
typename std::map<node_t, level_t>::iterator II;
for (II = small_visited.begin(); II != small_visited.end(); II++) {
node_t u = II->first;
level_t lev = II->second;
_ll_set_bit(visited_bitmap, u);
visited_level[u] = lev;
}
if (save_child) {
typename std::unordered_set<edge_t>::iterator J;
for (J = down_edge_set->begin(); J != down_edge_set->end(); J++) {
edge_t idx = *J;
#ifdef FORCE_L0
down_edge_array[idx] = 1;
#else
size_t level = LL_EDGE_LEVEL(idx);
if (level == LL_WRITABLE_LEVEL) {
down_edge_array_w[LL_EDGE_GET_WRITABLE(idx)->we_numerical_id] = 1;
}
down_edge_array[level][LL_EDGE_INDEX(idx)] = 1;
#endif
}
}
}
void iterate_neighbor_que(node_t t, int tid) {
ll_edge_iterator iter; iter_begin(iter, t);
for (edge_t nx = iter_next(iter); nx != LL_NIL_EDGE; nx = iter_next(iter)) {
node_t u = get_node(iter);
assert(u >= 0 && u < G.max_nodes());
// check visited bitmap
// test & test& set
if (_ll_get_bit(visited_bitmap, u) == 0) {
if (has_navigator) {
if (check_navigator(u, nx) == false) continue;
}
bool re_check_result;
if (use_multithread) {
re_check_result = _ll_set_bit_atomic(visited_bitmap, u);
} else {
re_check_result = true;
_ll_set_bit(visited_bitmap, u);
}
if (save_child) {
save_down_edge_large(nx);
}
if (re_check_result) {
// add to local q
thread_local_next_level[tid].push_back(u);
visited_level[u] = (curr_level + 1);
}
}
else if (save_child) {
if (has_navigator) {
if (check_navigator(u, nx) == false) continue;
}
if (visited_level[u] == (curr_level +1)) {
save_down_edge_large(nx);
}
}
}
}
void finish_thread_que(int tid) {
node_t local_cnt = thread_local_next_level[tid].size();
//copy curr_cnt to next_cnt
if (local_cnt > 0) {
node_t old_idx = __sync_fetch_and_add(&next_count, local_cnt);
// copy to global vector
memcpy(&(global_vector[global_next_level_begin + old_idx]),
&(thread_local_next_level[tid][0]),
local_cnt * sizeof(node_t));
}
thread_local_next_level[tid].clear();
}
void prepare_read() {
// nothing to do
}
void iterate_neighbor_rd(node_t t, node_t& local_cnt) {
ll_edge_iterator iter; iter_begin(iter, t);
for (edge_t nx = iter_next(iter); nx != LL_NIL_EDGE; nx = iter_next(iter)) {
node_t u = get_node(iter);
// check visited bitmap
// test & test& set
if (_ll_get_bit(visited_bitmap, u) == 0) {
if (has_navigator) {
if (check_navigator(u, nx) == false) continue;
}
bool re_check_result;
if (use_multithread) {
re_check_result = _ll_set_bit_atomic(visited_bitmap, u);
} else {
re_check_result = true;
_ll_set_bit(visited_bitmap, u);
}
if (save_child) {
save_down_edge_large(nx);
}
if (re_check_result) {
// add to local q
visited_level[u] = curr_level + 1;
local_cnt++;
}
}
else if (save_child) {
if (has_navigator) {
if (check_navigator(u, nx) == false) continue;
}
if (visited_level[u] == (curr_level +1)) {
save_down_edge_large(nx);
}
}
}
}
void finish_thread_rd(node_t local_cnt) {
__sync_fetch_and_add(&next_count, local_cnt);
}
//-----------------------------------------------------
//-----------------------------------------------------
static const int ST_SMALL = 0;
static const int ST_QUE = 1;
static const int ST_Q2R = 2;
static const int ST_RD = 3;
static const int ST_R2Q = 4;
static const int THRESHOLD1 = 128; // single threaded
static const int THRESHOLD2 = 1024; // move to RD-based
// not -1.
//(why? because curr_level-1 might be -1, when curr_level = 0)
static const level_t __INVALID_LEVEL = -2;
int state;
unsigned char* visited_bitmap; // bitmap
level_t* visited_level; // assumption: small_world graph
bool is_finished;
level_t curr_level;
node_t root;
Graph& G;
node_t curr_count;
node_t next_count;
//std::unordered_map<node_t, level_t> small_visited;
std::map<node_t, level_t> small_visited;
std::unordered_set<edge_t>* down_edge_set;
unsigned char* down_edge_array_w;
#ifdef FORCE_L0
unsigned char* down_edge_array;
#else
unsigned char** down_edge_array;
#endif
//node_t* global_next_level;
//node_t* global_curr_level;
//node_t* global_queue;
std::vector<node_t> global_vector;
node_t global_curr_level_begin;
node_t global_next_level_begin;
//std::vector<node_t*> level_start_ptr;
std::vector<node_t> level_queue_begin;
std::vector<node_t> level_count;
std::vector<node_t>* thread_local_next_level;
int max_threads;
};
#endif
|
omp_nest_lock.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
omp_nest_lock_t lck;
int test_omp_nest_lock()
{
int nr_threads_in_single = 0;
int result = 0;
int nr_iterations = 0;
int i;
omp_init_nest_lock(&lck);
#pragma omp parallel shared(lck)
{
#pragma omp for
for(i = 0; i < LOOPCOUNT; i++) {
omp_set_nest_lock(&lck);
#pragma omp flush
nr_threads_in_single++;
#pragma omp flush
nr_iterations++;
nr_threads_in_single--;
result = result + nr_threads_in_single;
omp_unset_nest_lock(&lck);
}
}
omp_destroy_nest_lock(&lck);
return ((result == 0) && (nr_iterations == LOOPCOUNT));
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_nest_lock()) {
num_failed++;
}
}
return num_failed;
}
|
region_2.tfm.c | void bar(int M, int *restrict T, int N, int *restrict A) {
#pragma omp parallel
{
#pragma omp for default(shared)
for (int I = 0; I < N; ++I) {
A[I] = I;
for (int J = 0; J < M; ++J)
A[I] = A[I] + T[J];
}
}
}
void foo(int N, int *A) {
int TSize = 4;
int T[4];
for (int I = 0; I < TSize; ++I)
T[I] = I;
#pragma spf region
{ bar(TSize, T, N, A); }
}
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2053
#define MaxBezierCoordinates 67108864
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *,ExceptionInfo *);
static ssize_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info,
% ExceptionInfo *excetion)
%
% A description of each parameter follows:
%
% o ConvertPathToPolygon() returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
if (polygon_info->edges != (EdgeInfo *) NULL)
{
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
if (polygon_info->edges[i].points != (PointInfo *) NULL)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(
polygon_info->edges);
}
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info,
ExceptionInfo *exception)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PolygonInfo *) NULL);
}
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
points=(PointInfo *) RelinquishMagickMemory(points);
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
polygon_info->number_edges=edge;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
points=(PointInfo *) RelinquishMagickMemory(points);
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
polygon_info->number_edges=edge+1;
points=(PointInfo *) NULL;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
polygon_info->number_edges=edge;
}
}
polygon_info->number_edges=edge;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(polygon_info->edges,
polygon_info->number_edges,sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
EdgeInfo
*edge_info;
edge_info=polygon_info->edges+i;
edge_info->points=(PointInfo *) ResizeQuantumMemory(edge_info->points,
edge_info->number_points,sizeof(*edge_info->points));
if (edge_info->points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o ConvertPrimitiveToPath() returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PathInfo *) NULL);
}
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
if (status == MagickFalse)
continue;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
clone_info->stroke_width/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (double) (MaxBezierCoordinates >> 2))
continue;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
PixelInfo
composite,
pixel;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad+1;
quantum=sizeof(**mvg_info->primitive_info);
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
static inline double GetDrawValue(const char *magick_restrict string,
char **magick_restrict sentinal)
{
char
**magick_restrict q;
double
value;
q=sentinal;
value=InterpretLocaleValue(string,q);
if ((IsNaN(value) != 0) || (value < -((double) SSIZE_MAX-512.0)) ||
(value > ((double) SSIZE_MAX-512.0)))
return(0.0);
sentinal=q;
return(value);
}
static int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=GetDrawValue(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(MagickFalse);
}
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if ((mvg_class != (const char *) NULL) && (p > primitive))
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
if (LocaleCompare("currentColor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->fill_alpha*=opacity;
else
graphic_context[n]->fill_alpha=QuantumRange*opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (IsPoint(token) == MagickFalse)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
GetDrawValue(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
{
graphic_context[n]->fill_alpha*=opacity;
graphic_context[n]->stroke_alpha*=opacity;
}
else
{
graphic_context[n]->fill_alpha=QuantumRange*opacity;
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
}
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(GetDrawValue(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(GetDrawValue(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
(void) GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->stroke_alpha*=opacity;
else
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
status=MagickFalse;
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(GetDrawValue(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(GetDrawValue(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(GetDrawValue(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(GetDrawValue(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(BezierQuantum*(double) primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (108.0*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=GetDrawValue(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
double
dx,
dy,
maximum_length;
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (MaxBezierCoordinates/100.0))
ThrowPointExpectedException(keyword,exception);
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates < 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if (status == 0)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
/*
Sanity check.
*/
status&=CheckPrimitiveExtent(&mvg_info,(size_t)
ExpandAffine(&graphic_context[n]->affine));
if (status == 0)
break;
status&=CheckPrimitiveExtent(&mvg_info,(size_t)
graphic_context[n]->stroke_width);
if (status == 0)
break;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#00000000",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern);
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PolygonInfo **) NULL);
}
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info,exception);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
polygon_info[0]=ConvertPathToPolygon(path_info,exception);
if (polygon_info[0] == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
for (i=1; i < (ssize_t) number_threads; i++)
{
EdgeInfo
*edge_info;
register ssize_t
j;
polygon_info[i]=(PolygonInfo *) AcquireMagickMemory(
sizeof(*polygon_info[i]));
if (polygon_info[i] == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
polygon_info[i]->number_edges=0;
edge_info=polygon_info[0]->edges;
polygon_info[i]->edges=(EdgeInfo *) AcquireQuantumMemory(
polygon_info[0]->number_edges,sizeof(*edge_info));
if (polygon_info[i]->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
(void) memcpy(polygon_info[i]->edges,edge_info,
polygon_info[0]->number_edges*sizeof(*edge_info));
for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++)
polygon_info[i]->edges[j].points=(PointInfo *) NULL;
polygon_info[i]->number_edges=polygon_info[0]->number_edges;
for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++)
{
edge_info=polygon_info[0]->edges+j;
polygon_info[i]->edges[j].points=(PointInfo *) AcquireQuantumMemory(
edge_info->number_points,sizeof(*edge_info));
if (polygon_info[i]->edges[j].points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
(void) memcpy(polygon_info[i]->edges[j].points,edge_info->points,
edge_info->number_points*sizeof(*edge_info->points));
}
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static size_t DestroyEdge(PolygonInfo *polygon_info,const ssize_t edge)
{
assert(edge < (ssize_t) polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < (ssize_t) polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon;
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info,exception);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
bounds=polygon_info[0]->edges[0].bounds;
artifact=GetImageArtifact(image,"draw:render-bounding-rectangles");
if (IsStringTrue(artifact) != MagickFalse)
(void) DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.5 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.5 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
point,
q;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status&=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
if (*primitive_info->text != '\0')
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
status&=SetImageInfo(clone_info,0,exception);
if ((LocaleNCompare(clone_info->magick,"http",4) == 0) ||
(LocaleCompare(clone_info->magick,"mpri") == 0))
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
if (clone_info->size != (char *) NULL)
clone_info->size=DestroyString(clone_info->size);
if (clone_info->extract != (char *) NULL)
clone_info->extract=DestroyString(clone_info->extract);
if (*clone_info->filename != '\0')
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
status&=TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
status&=SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
status&=DrawAffineImage(image,composite_image,&affine,exception);
else
status&=CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double)
GetPixelAlpha(image,q),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(draw_info,p,exception);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (coordinates > (108.0*BezierQuantum))
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static ssize_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(-1);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return((ssize_t) number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
#define MaxStrokePad (6*BezierQuantum+360)
#define CheckPathExtent(pad_p,pad_q) \
{ \
if ((pad_p) > MaxBezierCoordinates) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \
{ \
if (~extent_p < (pad_p)) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
{ \
extent_p+=(pad_p); \
stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \
MaxStrokePad,sizeof(*stroke_p)); \
} \
} \
if ((pad_q) > MaxBezierCoordinates) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \
{ \
if (~extent_q < (pad_q)) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
{ \
extent_q+=(pad_q); \
stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \
MaxStrokePad,sizeof(*stroke_q)); \
} \
} \
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \
{ \
if (stroke_p != (PointInfo *) NULL) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
if (stroke_q != (PointInfo *) NULL) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
(void) ThrowMagickException(exception,GetMagickModule(), \
ResourceLimitError,"MemoryAllocationFailed","`%s'",""); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _StrokeSegment
{
double
p,
q;
} StrokeSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*stroke_p,
*stroke_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
extent_p,
extent_q,
number_vertices;
ssize_t
j,
n,
p,
q;
StrokeSegment
dx = {0.0, 0.0},
dy = {0.0, 0.0},
inverse_slope = {0.0, 0.0},
slope = {0.0, 0.0},
theta = {0.0, 0.0};
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PrimitiveInfo *) NULL);
}
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x;
offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y;
closed_path=(fabs(offset.x) < MagickEpsilon) &&
(fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
extent_p=2*number_vertices;
extent_q=2*number_vertices;
stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad,
sizeof(*stroke_p));
stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad,
sizeof(*stroke_q));
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL))
{
if (stroke_p != (PointInfo *) NULL)
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
if (stroke_q != (PointInfo *) NULL)
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
stroke_q[p++]=box_q[0];
stroke_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(MaxStrokePad,MaxStrokePad);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad);
stroke_q[q].x=box_q[1].x;
stroke_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
stroke_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad);
stroke_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
stroke_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
stroke_p[p++]=box_p[1];
stroke_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
pr29947-2.c | /* PR libgomp/29947 */
/* { dg-options "-O2 -fopenmp" } */
/* { dg-do run } */
extern void abort (void);
int cnt;
void
test1 (long j1, long k1, long j2, long k2)
{
long i, e = 0, c = 0;
#pragma omp parallel reduction (+:e,c)
{
#pragma omp for schedule (static)
for (i = j1; i <= k1; ++i)
{
if (i < j2 || i > k2)
++e;
++c;
}
#pragma omp atomic
++cnt;
}
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
abort ();
}
void
test2 (long j1, long k1, long j2, long k2)
{
long i, e = 0, c = 0;
#pragma omp parallel reduction (+:e,c)
{
#pragma omp for schedule (static)
for (i = k1; i >= j1; --i)
{
if (i < j2 || i > k2)
++e;
++c;
}
#pragma omp atomic
++cnt;
}
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
abort ();
}
void
test3 (long j1, long k1, long j2, long k2)
{
long i, e = 0, c = 0;
#pragma omp parallel reduction (+:e,c)
{
#pragma omp for schedule (static, 1)
for (i = j1; i <= k1; ++i)
{
if (i < j2 || i > k2)
++e;
++c;
}
#pragma omp atomic
++cnt;
}
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
abort ();
}
void
test4 (long j1, long k1, long j2, long k2)
{
long i, e = 0, c = 0;
#pragma omp parallel reduction (+:e,c)
{
#pragma omp for schedule (static, 1)
for (i = k1; i >= j1; --i)
{
if (i < j2 || i > k2)
++e;
++c;
}
#pragma omp atomic
++cnt;
}
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
abort ();
}
void
test5 (long j1, long k1, long j2, long k2)
{
long i, e = 0, c = 0;
#pragma omp parallel reduction (+:e,c)
{
#pragma omp for schedule (static) ordered
for (i = j1; i <= k1; ++i)
{
if (i < j2 || i > k2)
++e;
#pragma omp ordered
++c;
}
#pragma omp atomic
++cnt;
}
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
abort ();
}
void
test6 (long j1, long k1, long j2, long k2)
{
long i, e = 0, c = 0;
#pragma omp parallel reduction (+:e,c)
{
#pragma omp for schedule (static) ordered
for (i = k1; i >= j1; --i)
{
if (i < j2 || i > k2)
++e;
#pragma omp ordered
++c;
}
#pragma omp atomic
++cnt;
}
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
abort ();
}
void
test7 (long j1, long k1, long j2, long k2)
{
long i, e = 0, c = 0;
#pragma omp parallel reduction (+:e,c)
{
#pragma omp for schedule (static, 1) ordered
for (i = j1; i <= k1; ++i)
{
if (i < j2 || i > k2)
++e;
#pragma omp ordered
++c;
}
#pragma omp atomic
++cnt;
}
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
abort ();
}
void
test8 (long j1, long k1, long j2, long k2)
{
long i, e = 0, c = 0;
#pragma omp parallel reduction (+:e,c)
{
#pragma omp for schedule (static, 1) ordered
for (i = k1; i >= j1; --i)
{
if (i < j2 || i > k2)
++e;
#pragma omp ordered
++c;
}
#pragma omp atomic
++cnt;
}
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
abort ();
}
void
test9 (long j1, long k1, long j2, long k2)
{
long i, e = 0, c = 0;
#pragma omp parallel for reduction (+:e,c) schedule (static)
for (i = j1; i <= k1; ++i)
{
if (i < j2 || i > k2)
++e;
++c;
}
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
abort ();
}
void
test10 (long j1, long k1, long j2, long k2)
{
long i, e = 0, c = 0;
#pragma omp parallel for reduction (+:e,c) schedule (static)
for (i = k1; i >= j1; --i)
{
if (i < j2 || i > k2)
++e;
++c;
}
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
abort ();
}
void
test11 (long j1, long k1, long j2, long k2)
{
long i, e = 0, c = 0;
#pragma omp parallel for reduction (+:e,c) schedule (static, 1)
for (i = j1; i <= k1; ++i)
{
if (i < j2 || i > k2)
++e;
++c;
}
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
abort ();
}
void
test12 (long j1, long k1, long j2, long k2)
{
long i, e = 0, c = 0;
#pragma omp parallel for reduction (+:e,c) schedule (static, 1)
for (i = k1; i >= j1; --i)
{
if (i < j2 || i > k2)
++e;
++c;
}
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
abort ();
}
void
test13 (long j1, long k1, long j2, long k2)
{
long i, e = 0, c = 0;
#pragma omp parallel for reduction (+:e,c) schedule (static) ordered
for (i = j1; i <= k1; ++i)
{
if (i < j2 || i > k2)
++e;
#pragma omp ordered
++c;
}
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
abort ();
}
void
test14 (long j1, long k1, long j2, long k2)
{
long i, e = 0, c = 0;
#pragma omp parallel for reduction (+:e,c) schedule (static) ordered
for (i = k1; i >= j1; --i)
{
if (i < j2 || i > k2)
++e;
#pragma omp ordered
++c;
}
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
abort ();
}
void
test15 (long j1, long k1, long j2, long k2)
{
long i, e = 0, c = 0;
#pragma omp parallel for reduction (+:e,c) schedule (static, 1) ordered
for (i = j1; i <= k1; ++i)
{
if (i < j2 || i > k2)
++e;
#pragma omp ordered
++c;
}
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
abort ();
}
void
test16 (long j1, long k1, long j2, long k2)
{
long i, e = 0, c = 0;
#pragma omp parallel for reduction (+:e,c) schedule (static, 1) ordered
for (i = k1; i >= j1; --i)
{
if (i < j2 || i > k2)
++e;
#pragma omp ordered
++c;
}
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1))
abort ();
}
int
__attribute__((noinline))
test (long j1, long k1, long j2, long k2)
{
test1 (j1, k1, j2, k2);
test2 (j1, k1, j2, k2);
test3 (j1, k1, j2, k2);
test4 (j1, k1, j2, k2);
test5 (j1, k1, j2, k2);
test6 (j1, k1, j2, k2);
test7 (j1, k1, j2, k2);
test8 (j1, k1, j2, k2);
test9 (j1, k1, j2, k2);
test10 (j1, k1, j2, k2);
test11 (j1, k1, j2, k2);
test12 (j1, k1, j2, k2);
test13 (j1, k1, j2, k2);
test14 (j1, k1, j2, k2);
test15 (j1, k1, j2, k2);
test16 (j1, k1, j2, k2);
return cnt;
}
int
main (void)
{
test (1, 5, 1, 5);
test (5, 5, 5, 5);
test (5, 4, 5, 4);
test (5, 1, 5, 1);
return 0;
}
|
declare-variant-1.c | int foo (int, int, int *);
int bar (int, int, int *);
#pragma omp declare variant (foo) \
match (construct={parallel,for},\
device={isa(avx512f,avx512vl),kind(host,cpu)},\
implementation={vendor(score(0):gnu),unified_shared_memory},\
user={condition(score(0):0)})
#pragma omp declare variant (bar) \
match (device={arch(x86_64,powerpc64),isa(avx512f,popcntb)}, \
implementation={atomic_default_mem_order(seq_cst),made_up_selector("foo", 13, "bar")}, \
user={condition(3-3)})
int baz (int, int, int *);
int
qux (void)
{
int i = 3;
return baz (1, 2, &i);
}
int quux (int);
void
corge (void)
{
int i;
#pragma omp declare variant (quux) match (construct={parallel,for})
extern int waldo (int);
waldo (5);
#pragma omp parallel for
for (i = 0; i < 3; i++)
waldo (6);
#pragma omp parallel
#pragma omp taskgroup
#pragma omp for
for (i = 0; i < 3; i++)
waldo (7);
#pragma omp parallel
#pragma omp master
waldo (8);
}
#pragma omp declare variant (bar) match \
(implementation={atomic_default_mem_order(relaxed), \
unified_address, unified_shared_memory, \
dynamic_allocators, reverse_offload})
int baz2 (int x, int y, int *z)
{
return x + y + *z;
}
#pragma omp declare variant (bar) match \
(implementation={atomic_default_mem_order(score(3): acq_rel)})
int baz3 (int, int, int *);
|
pairwise_transform.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
/*
* pairwise_transform.h
*
* Created on: Dec 28, 2015
* Author: agibsonccc
*/
#ifndef PAIRWISE_TRANSFORM_H_
#define PAIRWISE_TRANSFORM_H_
#ifdef _OPENMP
#include <omp.h>
#endif
#include <templatemath.h>
#include <helper_cuda.h>
#include <helpers/shape.h>
#include <pairwise_util.h>
#include <dll.h>
#include <stdio.h>
#include <ops/ops.h>
#include <op_boilerplate.h>
#ifdef __CUDACC__
#include <cuda.h>
#include <cuda_runtime.h>
#endif
#ifndef _OPENMP
#define omp_get_thread_num() 0
#define omp_get_max_threads() 1
#endif
#include "legacy_ops.h"
namespace functions {
namespace pairwise_transforms {
/**
* Transforms involving 2 arrays
*/
template<typename T>
class PairWiseTransform {
public:
#ifdef __CUDACC__
static __host__ void execudaCudaStrided(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, T *dx, Nd4jLong xStride, T *y, Nd4jLong yStride, T *result, Nd4jLong resultStride, T *extraParams, Nd4jLong n);
static __host__ void execudaCudaShaped(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, T *dx, Nd4jLong *xShapeInfo, T *y, Nd4jLong *yShapeInfo, T *result, Nd4jLong *resultShapeInfo, T *extraParams);
static __device__ void transformCuda(const int opNum, Nd4jLong n, T *dx, T *y, Nd4jLong incx, Nd4jLong incy, T *extraParams, T *result, Nd4jLong incz, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo);
static __device__ void transformCuda(const int opNum, T *dx, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo);
static __device__ void transformCuda(const int opNum, T *dx, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *indexes, Nd4jLong *yIndexes, Nd4jLong *resultIndexes, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo);
template<typename OpType>
static __device__ void transformCuda(T *dx, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo);
template<typename OpType>
static __device__ void transformCuda(Nd4jLong n, T *dx, T *dy, Nd4jLong incx, Nd4jLong incy, T *params, T *result, Nd4jLong incz, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo);
template<typename OpType>
static __device__ void transform(T *dx, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *indexes, Nd4jLong *yIndexes, Nd4jLong *resultIndexes, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo);
#endif
public:
static void exec(
const int opNum,
T *dx,
Nd4jLong *xShapeBuffer,
T *y,
Nd4jLong *yShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams,
Nd4jLong *indexes,
Nd4jLong *yIndexes,
Nd4jLong *resultIndexes) {
DISPATCH_BY_OPNUM(exec, PARAMS(dx,
xShapeBuffer,
y,
yShapeBuffer,
result, resultShapeBuffer,
extraParams,
indexes,
yIndexes,
resultIndexes), PAIRWISE_TRANSFORM_OPS);
}
static void exec(
const int opNum,
T *dx,
Nd4jLong *xShapeBuffer,
T *y,
Nd4jLong *yShapeBuffer,
T *result,
Nd4jLong *resultShapeBuffer,
T *extraParams) {
DISPATCH_BY_OPNUM(exec, PARAMS(dx,
xShapeBuffer,
y,
yShapeBuffer,
result,
resultShapeBuffer,
extraParams),
PAIRWISE_TRANSFORM_OPS);
}
static void exec(
const int opNum,
T *dx,
Nd4jLong xStride,
T *y,
Nd4jLong yStride,
T *result,
Nd4jLong resultStride,
T *extraParams,
Nd4jLong n) {
DISPATCH_BY_OPNUM(exec, PARAMS(dx,
xStride,
y,
yStride,
result,
resultStride,
extraParams,
n), PAIRWISE_TRANSFORM_OPS);
}
template<typename OpType>
static void exec(
T *dx,
Nd4jLong* xShapeBuffer,
T *y,
Nd4jLong* yShapeBuffer,
T *result,
Nd4jLong* resultShapeBuffer,
T *extraParams,
Nd4jLong *indexes,
Nd4jLong *yIndexes,
Nd4jLong *resultIndexes) {
Nd4jLong n = shape::length(xShapeBuffer);
#pragma omp parallel for simd schedule(guided) proc_bind(AFFINITY) default(shared)
for (Nd4jLong i = 0; i < n; i++) {
result[resultIndexes[i]] = OpType::op(dx[indexes[i]], y[yIndexes[i]], extraParams);
}
}
template<typename OpType>
static void exec(
T *dx,
Nd4jLong* xShapeBuffer,
T *y,
Nd4jLong* yShapeBuffer,
T *result,
Nd4jLong* resultShapeBuffer,
T *extraParams) {
auto n = shape::length(xShapeBuffer);
auto xElementWiseStride = shape::elementWiseStride(xShapeBuffer);
auto yElementWiseStride = shape::elementWiseStride(yShapeBuffer);
auto resultElementWiseStride = shape::elementWiseStride(resultShapeBuffer);
if (shape::isScalar(yShapeBuffer)) {
if (xElementWiseStride == 1 && resultElementWiseStride == 1) {
for (int e = 0; e < n; e++) {
result[e] = OpType::op(dx[e], y[0], extraParams);
}
} else {
Nd4jLong xCoord[MAX_RANK];
Nd4jLong resultCoord[MAX_RANK];
int xRank = shape::rank(xShapeBuffer);
int resultRank = shape::rank(resultShapeBuffer);
Nd4jLong *xShape = shape::shapeOf(xShapeBuffer);
Nd4jLong *xStride = shape::stride(xShapeBuffer);
Nd4jLong *resultShape = shape::shapeOf(resultShapeBuffer);
Nd4jLong *resultStride = shape::stride(resultShapeBuffer);
int elementsPerThread = n / ELEMENT_THRESHOLD;
int num_threads = nd4j::math::nd4j_max<int>(1, elementsPerThread);
num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads());
#pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared) private(xCoord, resultCoord)
for (Nd4jLong i = 0; i < n; i++) {
shape::ind2subC(xRank,xShape, i, xCoord);
shape::ind2subC(resultRank,resultShape, i, resultCoord);
Nd4jLong xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank);
Nd4jLong resultOffset = shape::getOffset(0, resultShape, resultStride, resultCoord, resultRank);
result[resultOffset] = OpType::op(dx[xOffset], y[0], extraParams);
}
}
return;
}
bool sameShape = shape::shapeEquals(shape::rank(xShapeBuffer), shape::shapeOf(xShapeBuffer),
shape::rank(yShapeBuffer), shape::shapeOf(yShapeBuffer));
if (xElementWiseStride >= 1 &&
yElementWiseStride >= 1 &&
resultElementWiseStride >= 1 &&
shape::order(xShapeBuffer) == shape::order(yShapeBuffer) &&
shape::order(resultShapeBuffer) == shape::order(xShapeBuffer) &&
sameShape && xElementWiseStride == yElementWiseStride) {
exec<OpType>(dx,
xElementWiseStride,
y,
yElementWiseStride,
result,
resultElementWiseStride,
extraParams,
n);
}
//not same shape
else if (!sameShape && shape::order(xShapeBuffer) == shape::order(yShapeBuffer) &&
shape::order(resultShapeBuffer) == shape::order(xShapeBuffer) && xElementWiseStride >= 1 &&
yElementWiseStride >= 1 &&
resultElementWiseStride >= 1 && xElementWiseStride == yElementWiseStride) {
exec<OpType>(dx,
xElementWiseStride,
y,
yElementWiseStride,
result,
resultElementWiseStride,
extraParams,
shape::length(yShapeBuffer));
}
else if (sameShape) {
int rank = shape::rank(xShapeBuffer);
Nd4jLong *xShape = shape::shapeOf(xShapeBuffer);
Nd4jLong *xStride = shape::stride(xShapeBuffer);
Nd4jLong *yStride = shape::stride(yShapeBuffer);
Nd4jLong *resultStride = shape::stride(resultShapeBuffer);
// tad-oriented rotation technically
int tadsPerThread = xShape[0] / TAD_THRESHOLD;
int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread);
num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads());
#pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads>1) proc_bind(AFFINITY) default(shared)
for (Nd4jLong i = 0; i < xShape[0]; i++) {
T *dxLocal = dx + xStride[0] * i;
T *yLocal = y + yStride[0] * i;
T *resultLocal = result + resultStride[0] * i;
int rankLocal = rank - 1;
Nd4jLong *xShapeLocal = xShape + 1;
Nd4jLong *xStrideLocal = xStride + 1;
Nd4jLong *yStrideLocal = yStride + 1;
Nd4jLong *resultStrideLocal = resultStride + 1;
Nd4jLong shapeIter[MAX_RANK];
Nd4jLong coord[MAX_RANK];
int dim;
Nd4jLong xStridesIter[MAX_RANK];
Nd4jLong yStridesIter[MAX_RANK];
Nd4jLong resultStridesIter[MAX_RANK];
if (PrepareThreeRawArrayIter<T>(rankLocal,
xShapeLocal,
dxLocal,
xStrideLocal,
yLocal,
yStrideLocal,
resultLocal,
resultStrideLocal,
rankLocal,
shapeIter,
&dxLocal,
xStridesIter,
&yLocal,
yStridesIter,
&resultLocal,
resultStridesIter) >= 0) {
ND4J_RAW_ITER_START(dim, rankLocal, coord, shapeIter); {
// Process the innermost dimension
T *xIter = dxLocal;
T *yIter = yLocal;
T *resultIter = resultLocal;
resultIter[0] = OpType::op(xIter[0], yIter[0], extraParams);
}
ND4J_RAW_ITER_THREE_NEXT(dim,
rankLocal,
coord,
shapeIter,
dxLocal,
xStridesIter,
yLocal,
yStridesIter,
resultLocal,
resultStridesIter);
}
else {
printf("Unable to prepare array\n");
}
}
}
else {
Nd4jLong len = n;
int xRank = shape::rank(xShapeBuffer);
int yRank = shape::rank(yShapeBuffer);
int resultRank = shape::rank(resultShapeBuffer);
Nd4jLong *xShape = shape::shapeOf(xShapeBuffer);
Nd4jLong *xStride = shape::stride(xShapeBuffer);
Nd4jLong *yShape = shape::shapeOf(yShapeBuffer);
Nd4jLong *yStride = shape::stride(yShapeBuffer);
Nd4jLong *resultShape = shape::shapeOf(resultShapeBuffer);
Nd4jLong *resultStride = shape::stride(resultShapeBuffer);
int elementsPerThread = n / ELEMENT_THRESHOLD;
int num_threads = nd4j::math::nd4j_max<int>(1, elementsPerThread);
num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads());
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
if(dx == result) {
#pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared) private(xCoord, yCoord)
for (Nd4jLong i = 0; i < len; i++) {
shape::ind2subC(xRank,xShape, i, xCoord);
shape::ind2subC(yRank,yShape, i, yCoord);
Nd4jLong xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank);
Nd4jLong yOffset = shape::getOffset(0, yShape, yStride, yCoord, yRank);
result[xOffset] = OpType::op(dx[xOffset], y[yOffset], extraParams);
}
}
else {
Nd4jLong resultCoord[MAX_RANK];
#pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared) private(xCoord, yCoord, resultCoord)
for (Nd4jLong i = 0; i < len; i++) {
shape::ind2subC(xRank,xShape, i, xCoord);
shape::ind2subC(yRank,yShape, i, yCoord);
shape::ind2subC(resultRank,resultShape, i, resultCoord);
Nd4jLong xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank);
Nd4jLong yOffset = shape::getOffset(0, yShape, yStride, yCoord, yRank);
Nd4jLong resultOffset = shape::getOffset(0, resultShape, resultStride, resultCoord, resultRank);
result[resultOffset] = OpType::op(dx[xOffset], y[yOffset], extraParams);
}
}
}
}
template<typename OpType>
static void exec(T *dx,
Nd4jLong xStride,
T *y,
Nd4jLong yStride,
T *result,
Nd4jLong resultStride,
T *extraParams,
const Nd4jLong n) {
int elementsPerThread = n / ELEMENT_THRESHOLD;
int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread);
_threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads());
int span = (n / _threads) + 8;
if (xStride == 1 && yStride == 1 && resultStride == 1) {
if (_threads > 1) {
#pragma omp parallel num_threads(_threads) if (_threads>1) proc_bind(AFFINITY) default(shared)
{
Nd4jLong tid = omp_get_thread_num();
Nd4jLong start = span * tid;
Nd4jLong end = span * (tid + 1);
if (end > n) end = n;
#pragma omp simd
for (Nd4jLong i = start; i < end; i++) {
result[i] = OpType::op(dx[i], y[i], extraParams);
}
}
} else {
#pragma omp simd
for (Nd4jLong i = 0; i < n; i++) {
result[i] = OpType::op(dx[i], y[i], extraParams);
}
}
}
else {
if (_threads > 1) {
#pragma omp parallel num_threads(_threads) if (_threads>1) proc_bind(AFFINITY) default(shared)
{
Nd4jLong tid = omp_get_thread_num();
Nd4jLong start = span * tid;
Nd4jLong end = span * (tid + 1);
if (end > n) end = n;
#pragma omp simd
for (Nd4jLong i = start; i < end; i++) {
result[i * resultStride] = OpType::op(dx[i * xStride], y[i * yStride], extraParams);
}
}
} else {
#pragma omp simd
for (Nd4jLong i = 0; i < n; i++) {
result[i * resultStride] = OpType::op(dx[i * xStride], y[i * yStride], extraParams);
}
}
}
}
};
}
}
#endif /* PAIRWISE_TRANSFORM_H_ */
|
hello.c | #include <stdio.h>
#ifdef _OPENACC
#include <openacc.h>
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#define N 1000
int main() {
int a[N];
int b[N];
#ifdef _OPENACC
acc_init(acc_device_not_host);
printf(" Compiling with OpenACC support \n");
#endif
printf(" Hello World! \n ");
// Compute on the host
for (int i = 0; i < N; i++) {
a [i] = i;
}
// Compute on the GPU if OpenACC/OpenMP support - host if not
#ifdef _OPENACC
#pragma acc kernels copy(b[0:N])
#endif
#ifdef _OPENMP
#pragma omp target map(from:b[0:N])
#endif
for (int i = 0; i < N; i++) {
b[i] = i;
}
for (int i = 0; i < N; i++) {
if (a[i] != b[i]) {
printf("Something went wrong\n");
return 1;
}
}
#ifdef _OPENACC
acc_shutdown(acc_device_not_host);
#endif
return 0;
}
|
2.hello.c | #include <stdio.h>
#include <omp.h>
/* If the OMP_NUM_THREADS variable is set to 8 with */
/* export OMP_NUM_THREADS=8 */
/* Q1: Is the execution of the program correct? Add a */
/* data sharing clause to make it correct */
/* Q2: Are the lines always printed in the same order? */
/* Could the messages appear intermixed? */
int main ()
{
int id;
#pragma omp parallel
{
#pragma omp critical
{
id =omp_get_thread_num();
printf("(%d) Hello ",id);
printf("(%d) world!\n",id);
}
}
return 0;
}
|
cpu.c | /*
* Copyright 2012 INRIA Paris-Rocquencourt
* Copyright 2012 Ecole Normale Superieure
*
* Use of this software is governed by the MIT license
*
* Written by Tobias Grosser, INRIA Paris-Rocquencourt,
* Domaine de Voluceau, Rocquenqourt, B.P. 105,
* 78153 Le Chesnay Cedex France
* and Sven Verdoolaege,
* Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
*/
#include <limits.h>
#include <stdio.h>
#include <string.h>
#include <isl/aff.h>
#include <isl/ctx.h>
#include <isl/flow.h>
#include <isl/map.h>
#include <isl/ast_build.h>
#include <isl/schedule.h>
#include <isl/schedule_node.h>
#include <pet.h>
#include "ppcg.h"
#include "ppcg_options.h"
#include "cpu.h"
#include "print.h"
#include "schedule.h"
#include "util.h"
/* Representation of a statement inside a generated AST.
*
* "stmt" refers to the original statement.
* "ref2expr" maps the reference identifier of each access in
* the statement to an AST expression that should be printed
* at the place of the access.
*/
struct ppcg_stmt {
struct pet_stmt *stmt;
isl_id_to_ast_expr *ref2expr;
};
static void ppcg_stmt_free(void *user)
{
struct ppcg_stmt *stmt = user;
if (!stmt)
return;
isl_id_to_ast_expr_free(stmt->ref2expr);
free(stmt);
}
/* Derive the output file name from the input file name.
* 'input' is the entire path of the input file. The output
* is the file name plus the additional extension.
*
* We will basically replace everything after the last point
* with '.ppcg.c'. This means file.c becomes file.ppcg.c
*/
static FILE *get_output_file(const char *input, const char *output)
{
char name[PATH_MAX];
const char *ext;
const char ppcg_marker[] = ".ppcg";
int len;
FILE *file;
len = ppcg_extract_base_name(name, input);
strcpy(name + len, ppcg_marker);
ext = strrchr(input, '.');
strcpy(name + len + sizeof(ppcg_marker) - 1, ext ? ext : ".c");
if (!output)
output = name;
file = fopen(output, "w");
if (!file) {
fprintf(stderr, "Unable to open '%s' for writing\n", output);
return NULL;
}
return file;
}
/* Data used to annotate for nodes in the ast.
*/
struct ast_node_userinfo {
/* The for node is an openmp parallel for node. */
int is_openmp;
};
/* Information used while building the ast.
*/
struct ast_build_userinfo {
/* The current ppcg scop. */
struct ppcg_scop *scop;
/* Are we currently in a parallel for loop? */
int in_parallel_for;
};
/* Check if the current scheduling dimension is parallel.
*
* We check for parallelism by verifying that the loop does not carry any
* dependences.
* If the live_range_reordering option is set, then this currently
* includes the order dependences. In principle, non-zero order dependences
* could be allowed, but this would require privatization and/or expansion.
*
* Parallelism test: if the distance is zero in all outer dimensions, then it
* has to be zero in the current dimension as well.
* Implementation: first, translate dependences into time space, then force
* outer dimensions to be equal. If the distance is zero in the current
* dimension, then the loop is parallel.
* The distance is zero in the current dimension if it is a subset of a map
* with equal values for the current dimension.
*/
static int ast_schedule_dim_is_parallel(__isl_keep isl_ast_build *build,
struct ppcg_scop *scop)
{
isl_union_map *schedule, *deps;
isl_map *schedule_deps, *test;
isl_space *schedule_space;
unsigned i, dimension, is_parallel;
schedule = isl_ast_build_get_schedule(build);
schedule_space = isl_ast_build_get_schedule_space(build);
dimension = isl_space_dim(schedule_space, isl_dim_out) - 1;
deps = isl_union_map_copy(scop->dep_flow);
deps = isl_union_map_union(deps, isl_union_map_copy(scop->dep_false));
if (scop->options->live_range_reordering) {
isl_union_map *order = isl_union_map_copy(scop->dep_order);
deps = isl_union_map_union(deps, order);
}
deps = isl_union_map_apply_range(deps, isl_union_map_copy(schedule));
deps = isl_union_map_apply_domain(deps, schedule);
if (isl_union_map_is_empty(deps)) {
isl_union_map_free(deps);
isl_space_free(schedule_space);
return 1;
}
schedule_deps = isl_map_from_union_map(deps);
for (i = 0; i < dimension; i++)
schedule_deps = isl_map_equate(schedule_deps, isl_dim_out, i,
isl_dim_in, i);
test = isl_map_universe(isl_map_get_space(schedule_deps));
test = isl_map_equate(test, isl_dim_out, dimension, isl_dim_in,
dimension);
is_parallel = isl_map_is_subset(schedule_deps, test);
isl_space_free(schedule_space);
isl_map_free(test);
isl_map_free(schedule_deps);
return is_parallel;
}
/* Mark a for node openmp parallel, if it is the outermost parallel for node.
*/
static void mark_openmp_parallel(__isl_keep isl_ast_build *build,
struct ast_build_userinfo *build_info,
struct ast_node_userinfo *node_info)
{
if (build_info->in_parallel_for)
return;
if (ast_schedule_dim_is_parallel(build, build_info->scop)) {
build_info->in_parallel_for = 1;
node_info->is_openmp = 1;
}
}
/* Allocate an ast_node_info structure and initialize it with default values.
*/
static struct ast_node_userinfo *allocate_ast_node_userinfo()
{
struct ast_node_userinfo *node_info;
node_info = (struct ast_node_userinfo *)
malloc(sizeof(struct ast_node_userinfo));
node_info->is_openmp = 0;
return node_info;
}
/* Free an ast_node_info structure.
*/
static void free_ast_node_userinfo(void *ptr)
{
struct ast_node_userinfo *info;
info = (struct ast_node_userinfo *) ptr;
free(info);
}
/* This method is executed before the construction of a for node. It creates
* an isl_id that is used to annotate the subsequently generated ast for nodes.
*
* In this function we also run the following analyses:
*
* - Detection of openmp parallel loops
*/
static __isl_give isl_id *ast_build_before_for(
__isl_keep isl_ast_build *build, void *user)
{
isl_id *id;
struct ast_build_userinfo *build_info;
struct ast_node_userinfo *node_info;
build_info = (struct ast_build_userinfo *) user;
node_info = allocate_ast_node_userinfo();
id = isl_id_alloc(isl_ast_build_get_ctx(build), "", node_info);
id = isl_id_set_free_user(id, free_ast_node_userinfo);
mark_openmp_parallel(build, build_info, node_info);
return id;
}
/* This method is executed after the construction of a for node.
*
* It performs the following actions:
*
* - Reset the 'in_parallel_for' flag, as soon as we leave a for node,
* that is marked as openmp parallel.
*
*/
static __isl_give isl_ast_node *ast_build_after_for(
__isl_take isl_ast_node *node, __isl_keep isl_ast_build *build,
void *user)
{
isl_id *id;
struct ast_build_userinfo *build_info;
struct ast_node_userinfo *info;
id = isl_ast_node_get_annotation(node);
info = isl_id_get_user(id);
if (info && info->is_openmp) {
build_info = (struct ast_build_userinfo *) user;
build_info->in_parallel_for = 0;
}
isl_id_free(id);
return node;
}
/* Find the element in scop->stmts that has the given "id".
*/
static struct pet_stmt *find_stmt(struct ppcg_scop *scop, __isl_keep isl_id *id)
{
int i;
for (i = 0; i < scop->pet->n_stmt; ++i) {
struct pet_stmt *stmt = scop->pet->stmts[i];
isl_id *id_i;
id_i = isl_set_get_tuple_id(stmt->domain);
isl_id_free(id_i);
if (id_i == id)
return stmt;
}
isl_die(isl_id_get_ctx(id), isl_error_internal,
"statement not found", return NULL);
}
/* Print a user statement in the generated AST.
* The ppcg_stmt has been attached to the node in at_each_domain.
*/
static __isl_give isl_printer *print_user(__isl_take isl_printer *p,
__isl_take isl_ast_print_options *print_options,
__isl_keep isl_ast_node *node, void *user)
{
struct ppcg_stmt *stmt;
isl_id *id;
id = isl_ast_node_get_annotation(node);
stmt = isl_id_get_user(id);
isl_id_free(id);
p = pet_stmt_print_body(stmt->stmt, p, stmt->ref2expr);
isl_ast_print_options_free(print_options);
return p;
}
/* Print a for loop node as an openmp parallel loop.
*
* To print an openmp parallel loop we print a normal for loop, but add
* "#pragma openmp parallel for" in front.
*
* Variables that are declared within the body of this for loop are
* automatically openmp 'private'. Iterators declared outside of the
* for loop are automatically openmp 'shared'. As ppcg declares all iterators
* at the position where they are assigned, there is no need to explicitly mark
* variables. Their automatically assigned type is already correct.
*
* This function only generates valid OpenMP code, if the ast was generated
* with the 'atomic-bounds' option enabled.
*
*/
static __isl_give isl_printer *print_for_with_openmp(
__isl_keep isl_ast_node *node, __isl_take isl_printer *p,
__isl_take isl_ast_print_options *print_options)
{
p = isl_printer_start_line(p);
p = isl_printer_print_str(p, "#pragma omp parallel for");
p = isl_printer_end_line(p);
p = isl_ast_node_for_print(node, p, print_options);
return p;
}
/* Print a for node.
*
* Depending on how the node is annotated, we either print a normal
* for node or an openmp parallel for node.
*/
static __isl_give isl_printer *print_for(__isl_take isl_printer *p,
__isl_take isl_ast_print_options *print_options,
__isl_keep isl_ast_node *node, void *user)
{
isl_id *id;
int openmp;
openmp = 0;
id = isl_ast_node_get_annotation(node);
if (id) {
struct ast_node_userinfo *info;
info = (struct ast_node_userinfo *) isl_id_get_user(id);
if (info && info->is_openmp)
openmp = 1;
}
if (openmp)
p = print_for_with_openmp(node, p, print_options);
else
p = isl_ast_node_for_print(node, p, print_options);
isl_id_free(id);
return p;
}
/* Index transformation callback for pet_stmt_build_ast_exprs.
*
* "index" expresses the array indices in terms of statement iterators
* "iterator_map" expresses the statement iterators in terms of
* AST loop iterators.
*
* The result expresses the array indices in terms of
* AST loop iterators.
*/
static __isl_give isl_multi_pw_aff *pullback_index(
__isl_take isl_multi_pw_aff *index, __isl_keep isl_id *id, void *user)
{
isl_pw_multi_aff *iterator_map = user;
iterator_map = isl_pw_multi_aff_copy(iterator_map);
return isl_multi_pw_aff_pullback_pw_multi_aff(index, iterator_map);
}
/* Transform the accesses in the statement associated to the domain
* called by "node" to refer to the AST loop iterators, construct
* corresponding AST expressions using "build",
* collect them in a ppcg_stmt and annotate the node with the ppcg_stmt.
*/
static __isl_give isl_ast_node *at_each_domain(__isl_take isl_ast_node *node,
__isl_keep isl_ast_build *build, void *user)
{
struct ppcg_scop *scop = user;
isl_ast_expr *expr, *arg;
isl_ctx *ctx;
isl_id *id;
isl_map *map;
isl_pw_multi_aff *iterator_map;
struct ppcg_stmt *stmt;
ctx = isl_ast_node_get_ctx(node);
stmt = isl_calloc_type(ctx, struct ppcg_stmt);
if (!stmt)
goto error;
expr = isl_ast_node_user_get_expr(node);
arg = isl_ast_expr_get_op_arg(expr, 0);
isl_ast_expr_free(expr);
id = isl_ast_expr_get_id(arg);
isl_ast_expr_free(arg);
stmt->stmt = find_stmt(scop, id);
isl_id_free(id);
if (!stmt->stmt)
goto error;
map = isl_map_from_union_map(isl_ast_build_get_schedule(build));
map = isl_map_reverse(map);
iterator_map = isl_pw_multi_aff_from_map(map);
stmt->ref2expr = pet_stmt_build_ast_exprs(stmt->stmt, build,
&pullback_index, iterator_map, NULL, NULL);
isl_pw_multi_aff_free(iterator_map);
id = isl_id_alloc(isl_ast_node_get_ctx(node), NULL, stmt);
id = isl_id_set_free_user(id, &ppcg_stmt_free);
return isl_ast_node_set_annotation(node, id);
error:
ppcg_stmt_free(stmt);
return isl_ast_node_free(node);
}
/* Set *depth (initialized to 0 by the caller) to the maximum
* of the schedule depths of the leaf nodes for which this function is called.
*/
static isl_bool update_depth(__isl_keep isl_schedule_node *node, void *user)
{
int *depth = user;
int node_depth;
if (isl_schedule_node_get_type(node) != isl_schedule_node_leaf)
return isl_bool_true;
node_depth = isl_schedule_node_get_schedule_depth(node);
if (node_depth > *depth)
*depth = node_depth;
return isl_bool_false;
}
/* This function is called for each node in a CPU AST.
* In case of a user node, print the macro definitions required
* for printing the AST expressions in the annotation, if any.
* For other nodes, return true such that descendants are also
* visited.
*
* In particular, print the macro definitions needed for the substitutions
* of the original user statements.
*/
static isl_bool at_node(__isl_keep isl_ast_node *node, void *user)
{
struct ppcg_stmt *stmt;
isl_id *id;
isl_printer **p = user;
if (isl_ast_node_get_type(node) != isl_ast_node_user)
return isl_bool_true;
id = isl_ast_node_get_annotation(node);
stmt = isl_id_get_user(id);
isl_id_free(id);
if (!stmt)
return isl_bool_error;
*p = ppcg_print_body_macros(*p, stmt->ref2expr);
if (!*p)
return isl_bool_error;
return isl_bool_false;
}
/* Print the required macros for the CPU AST "node" to "p",
* including those needed for the user statements inside the AST.
*/
static __isl_give isl_printer *cpu_print_macros(__isl_take isl_printer *p,
__isl_keep isl_ast_node *node)
{
if (isl_ast_node_foreach_descendant_top_down(node, &at_node, &p) < 0)
return isl_printer_free(p);
p = ppcg_print_macros(p, node);
return p;
}
/* Code generate the scop 'scop' using "schedule"
* and print the corresponding C code to 'p'.
*/
static __isl_give isl_printer *print_scop(struct ppcg_scop *scop,
__isl_take isl_schedule *schedule, __isl_take isl_printer *p,
struct ppcg_options *options)
{
isl_ctx *ctx = isl_printer_get_ctx(p);
isl_ast_build *build;
isl_ast_print_options *print_options;
isl_ast_node *tree;
isl_id_list *iterators;
struct ast_build_userinfo build_info;
int depth;
depth = 0;
if (isl_schedule_foreach_schedule_node_top_down(schedule, &update_depth,
&depth) < 0)
goto error;
build = isl_ast_build_alloc(ctx);
iterators = ppcg_scop_generate_names(scop, depth, "c");
build = isl_ast_build_set_iterators(build, iterators);
build = isl_ast_build_set_at_each_domain(build, &at_each_domain, scop);
if (options->openmp) {
build_info.scop = scop;
build_info.in_parallel_for = 0;
build = isl_ast_build_set_before_each_for(build,
&ast_build_before_for,
&build_info);
build = isl_ast_build_set_after_each_for(build,
&ast_build_after_for,
&build_info);
}
tree = isl_ast_build_node_from_schedule(build, schedule);
isl_ast_build_free(build);
print_options = isl_ast_print_options_alloc(ctx);
print_options = isl_ast_print_options_set_print_user(print_options,
&print_user, NULL);
print_options = isl_ast_print_options_set_print_for(print_options,
&print_for, NULL);
p = cpu_print_macros(p, tree);
p = isl_ast_node_print(tree, p, print_options);
isl_ast_node_free(tree);
return p;
error:
isl_schedule_free(schedule);
isl_printer_free(p);
return NULL;
}
/* Tile the band node "node" with tile sizes "sizes" and
* mark all members of the resulting tile node as "atomic".
*/
static __isl_give isl_schedule_node *tile(__isl_take isl_schedule_node *node,
__isl_take isl_multi_val *sizes)
{
node = isl_schedule_node_band_tile(node, sizes);
node = ppcg_set_schedule_node_type(node, isl_ast_loop_atomic);
return node;
}
/* Tile "node", if it is a band node with at least 2 members.
* The tile sizes are set from the "tile_size" option.
*/
static __isl_give isl_schedule_node *tile_band(
__isl_take isl_schedule_node *node, void *user)
{
struct ppcg_scop *scop = user;
int n;
isl_space *space;
isl_multi_val *sizes;
if (isl_schedule_node_get_type(node) != isl_schedule_node_band)
return node;
n = isl_schedule_node_band_n_member(node);
if (n <= 1)
return node;
space = isl_schedule_node_band_get_space(node);
sizes = ppcg_multi_val_from_int(space, scop->options->tile_size);
return tile(node, sizes);
}
/* Construct schedule constraints from the dependences in ps
* for the purpose of computing a schedule for a CPU.
*
* The proximity constraints are set to the flow dependences.
*
* If live-range reordering is allowed then the conditional validity
* constraints are set to the order dependences with the flow dependences
* as condition. That is, a live-range (flow dependence) will be either
* local to an iteration of a band or all adjacent order dependences
* will be respected by the band.
* The validity constraints are set to the union of the flow dependences
* and the forced dependences, while the coincidence constraints
* are set to the union of the flow dependences, the forced dependences and
* the order dependences.
*
* If live-range reordering is not allowed, then both the validity
* and the coincidence constraints are set to the union of the flow
* dependences and the false dependences.
*
* Note that the coincidence constraints are only set when the "openmp"
* options is set. Even though the way openmp pragmas are introduced
* does not rely on the coincident property of the schedule band members,
* the coincidence constraints do affect the way the schedule is constructed,
* such that more schedule dimensions should be detected as parallel
* by ast_schedule_dim_is_parallel.
* Since the order dependences are also taken into account by
* ast_schedule_dim_is_parallel, they are also added to
* the coincidence constraints. If the openmp handling learns
* how to privatize some memory, then the corresponding order
* dependences can be removed from the coincidence constraints.
*/
static __isl_give isl_schedule_constraints *construct_cpu_schedule_constraints(
struct ppcg_scop *ps)
{
isl_schedule_constraints *sc;
isl_union_map *validity, *coincidence;
sc = isl_schedule_constraints_on_domain(isl_union_set_copy(ps->domain));
if (ps->options->live_range_reordering) {
sc = isl_schedule_constraints_set_conditional_validity(sc,
isl_union_map_copy(ps->tagged_dep_flow),
isl_union_map_copy(ps->tagged_dep_order));
validity = isl_union_map_copy(ps->dep_flow);
validity = isl_union_map_union(validity,
isl_union_map_copy(ps->dep_forced));
if (ps->options->openmp) {
coincidence = isl_union_map_copy(validity);
coincidence = isl_union_map_union(coincidence,
isl_union_map_copy(ps->dep_order));
}
} else {
validity = isl_union_map_copy(ps->dep_flow);
validity = isl_union_map_union(validity,
isl_union_map_copy(ps->dep_false));
if (ps->options->openmp)
coincidence = isl_union_map_copy(validity);
}
if (ps->options->openmp)
sc = isl_schedule_constraints_set_coincidence(sc, coincidence);
sc = isl_schedule_constraints_set_validity(sc, validity);
sc = isl_schedule_constraints_set_proximity(sc,
isl_union_map_copy(ps->dep_flow));
return sc;
}
/* Compute a schedule for the scop "ps".
*
* First derive the appropriate schedule constraints from the dependences
* in "ps" and then compute a schedule from those schedule constraints,
* possibly grouping statement instances based on the input schedule.
*/
static __isl_give isl_schedule *compute_cpu_schedule(struct ppcg_scop *ps)
{
isl_schedule_constraints *sc;
isl_schedule *schedule;
if (!ps)
return NULL;
sc = construct_cpu_schedule_constraints(ps);
if (ps->options->debug->dump_schedule_constraints)
isl_schedule_constraints_dump(sc);
schedule = ppcg_compute_schedule(sc, ps->schedule, ps->options);
return schedule;
}
/* Compute a new schedule to the scop "ps" if the reschedule option is set.
* Otherwise, return a copy of the original schedule.
*/
static __isl_give isl_schedule *optionally_compute_schedule(void *user)
{
struct ppcg_scop *ps = user;
if (!ps)
return NULL;
if (!ps->options->reschedule)
return isl_schedule_copy(ps->schedule);
return compute_cpu_schedule(ps);
}
/* Compute a schedule based on the dependences in "ps" and
* tile it if requested by the user.
*/
static __isl_give isl_schedule *get_schedule(struct ppcg_scop *ps,
struct ppcg_options *options)
{
isl_ctx *ctx;
isl_schedule *schedule;
if (!ps)
return NULL;
ctx = isl_union_set_get_ctx(ps->domain);
schedule = ppcg_get_schedule(ctx, options,
&optionally_compute_schedule, ps);
if (ps->options->tile)
schedule = isl_schedule_map_schedule_node_bottom_up(schedule,
&tile_band, ps);
return schedule;
}
/* Generate CPU code for the scop "ps" using "schedule" and
* print the corresponding C code to "p", including variable declarations.
*/
static __isl_give isl_printer *print_cpu_with_schedule(
__isl_take isl_printer *p, struct ppcg_scop *ps,
__isl_take isl_schedule *schedule, struct ppcg_options *options)
{
int hidden;
isl_set *context;
p = isl_printer_start_line(p);
p = isl_printer_print_str(p, "/* ppcg generated CPU code */");
p = isl_printer_end_line(p);
p = isl_printer_start_line(p);
p = isl_printer_end_line(p);
p = ppcg_set_macro_names(p);
p = ppcg_print_exposed_declarations(p, ps);
hidden = ppcg_scop_any_hidden_declarations(ps);
if (hidden) {
p = ppcg_start_block(p);
p = ppcg_print_hidden_declarations(p, ps);
}
context = isl_set_copy(ps->context);
context = isl_set_from_params(context);
schedule = isl_schedule_insert_context(schedule, context);
if (options->debug->dump_final_schedule)
isl_schedule_dump(schedule);
p = print_scop(ps, schedule, p, options);
if (hidden)
p = ppcg_end_block(p);
return p;
}
/* Generate CPU code for the scop "ps" and print the corresponding C code
* to "p", including variable declarations.
*/
__isl_give isl_printer *print_cpu(__isl_take isl_printer *p,
struct ppcg_scop *ps, struct ppcg_options *options)
{
isl_schedule *schedule;
schedule = isl_schedule_copy(ps->schedule);
return print_cpu_with_schedule(p, ps, schedule, options);
}
/* Generate CPU code for "scop" and print it to "p".
*
* First obtain a schedule for "scop" and then print code for "scop"
* using that schedule.
*/
static __isl_give isl_printer *generate(__isl_take isl_printer *p,
struct ppcg_scop *scop, struct ppcg_options *options)
{
isl_schedule *schedule;
schedule = get_schedule(scop, options);
return print_cpu_with_schedule(p, scop, schedule, options);
}
/* Wrapper around generate for use as a ppcg_transform callback.
*/
static __isl_give isl_printer *print_cpu_wrap(__isl_take isl_printer *p,
struct ppcg_scop *scop, void *user)
{
struct ppcg_options *options = user;
return generate(p, scop, options);
}
/* Transform the code in the file called "input" by replacing
* all scops by corresponding CPU code and write the results to a file
* called "output".
*/
int generate_cpu(isl_ctx *ctx, struct ppcg_options *options,
const char *input, const char *output)
{
FILE *output_file;
int r;
output_file = get_output_file(input, output);
if (!output_file)
return -1;
r = ppcg_transform(ctx, input, output_file, options,
&print_cpu_wrap, options);
fclose(output_file);
return r;
}
|
par_mgr.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Two-grid system solver
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
#include "par_mgr.h"
#include <assert.h>
/* Create */
void *
hypre_MGRCreate()
{
hypre_ParMGRData *mgr_data;
mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST);
/* block data */
(mgr_data -> block_size) = 1;
(mgr_data -> num_coarse_indexes) = 1;
(mgr_data -> block_num_coarse_indexes) = NULL;
(mgr_data -> block_cf_marker) = NULL;
/* general data */
(mgr_data -> max_num_coarse_levels) = 10;
(mgr_data -> A_array) = NULL;
(mgr_data -> P_array) = NULL;
(mgr_data -> RT_array) = NULL;
(mgr_data -> RAP) = NULL;
(mgr_data -> CF_marker_array) = NULL;
(mgr_data -> coarse_indices_lvls) = NULL;
(mgr_data -> F_array) = NULL;
(mgr_data -> U_array) = NULL;
(mgr_data -> residual) = NULL;
(mgr_data -> rel_res_norms) = NULL;
(mgr_data -> Vtemp) = NULL;
(mgr_data -> Ztemp) = NULL;
(mgr_data -> Utemp) = NULL;
(mgr_data -> Ftemp) = NULL;
(mgr_data -> num_iterations) = 0;
(mgr_data -> num_interp_sweeps) = 1;
(mgr_data -> num_restrict_sweeps) = 1;
(mgr_data -> trunc_factor) = 0.0;
(mgr_data -> max_row_sum) = 0.9;
(mgr_data -> strong_threshold) = 0.25;
(mgr_data -> S_commpkg_switch) = 1.0;
(mgr_data -> P_max_elmts) = 0;
(mgr_data -> coarse_grid_solver) = NULL;
(mgr_data -> coarse_grid_solver_setup) = NULL;
(mgr_data -> coarse_grid_solver_solve) = NULL;
(mgr_data -> global_smoother) = NULL;
(mgr_data -> use_default_cgrid_solver) = 1;
(mgr_data -> omega) = 1.;
(mgr_data -> max_iter) = 20;
(mgr_data -> tol) = 1.0e-7;
(mgr_data -> relax_type) = 0;
(mgr_data -> relax_order) = 1;
(mgr_data -> interp_type) = 2;
(mgr_data -> restrict_type) = 0;
(mgr_data -> num_relax_sweeps) = 1;
(mgr_data -> relax_weight) = 1.0;
(mgr_data -> logging) = 0;
(mgr_data -> print_level) = 0;
(mgr_data -> l1_norms) = NULL;
(mgr_data -> reserved_coarse_size) = 0;
(mgr_data -> reserved_coarse_indexes) = NULL;
(mgr_data -> reserved_Cpoint_local_indexes) = NULL;
(mgr_data -> diaginv) = NULL;
(mgr_data -> global_smooth_iters) = 1;
(mgr_data -> global_smooth_type) = 0;
(mgr_data -> set_non_Cpoints_to_F) = 0;
(mgr_data -> Frelax_method) = 0;
(mgr_data -> FrelaxVcycleData) = NULL;
(mgr_data -> max_local_lvls) = 10;
(mgr_data -> print_coarse_system) = 0;
return (void *) mgr_data;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/* Destroy */
HYPRE_Int
hypre_MGRDestroy( void *data )
{
hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data;
HYPRE_Int i;
HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels);
/* block info data */
if ((mgr_data -> block_cf_marker))
{
for (i=0; i < (mgr_data -> max_num_coarse_levels); i++)
{
if ((mgr_data -> block_cf_marker)[i])
{
hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST);
}
}
hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker) = NULL;
}
if(mgr_data -> block_num_coarse_indexes)
{
hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST);
(mgr_data -> block_num_coarse_indexes) = NULL;
}
/* final residual vector */
if((mgr_data -> residual))
{
hypre_ParVectorDestroy( (mgr_data -> residual) );
(mgr_data -> residual) = NULL;
}
if((mgr_data -> rel_res_norms))
{
hypre_TFree( (mgr_data -> rel_res_norms) , HYPRE_MEMORY_HOST);
(mgr_data -> rel_res_norms) = NULL;
}
/* temp vectors for solve phase */
if((mgr_data -> Vtemp))
{
hypre_ParVectorDestroy( (mgr_data -> Vtemp) );
(mgr_data -> Vtemp) = NULL;
}
if((mgr_data -> Ztemp))
{
hypre_ParVectorDestroy( (mgr_data -> Ztemp) );
(mgr_data -> Ztemp) = NULL;
}
if((mgr_data -> Utemp))
{
hypre_ParVectorDestroy( (mgr_data -> Utemp) );
(mgr_data -> Utemp) = NULL;
}
if((mgr_data -> Ftemp))
{
hypre_ParVectorDestroy( (mgr_data -> Ftemp) );
(mgr_data -> Ftemp) = NULL;
}
/* coarse grid solver */
if((mgr_data -> use_default_cgrid_solver))
{
if((mgr_data -> coarse_grid_solver))
hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) );
(mgr_data -> coarse_grid_solver) = NULL;
}
/* l1_norms */
if ((mgr_data -> l1_norms))
{
for (i=0; i < (num_coarse_levels); i++)
if ((mgr_data -> l1_norms)[i])
hypre_TFree((mgr_data -> l1_norms)[i], HYPRE_MEMORY_HOST);
hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST);
}
/* coarse_indices_lvls */
if ((mgr_data -> coarse_indices_lvls))
{
for (i=0; i < (num_coarse_levels); i++)
if ((mgr_data -> coarse_indices_lvls)[i])
hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST);
hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST);
}
/* linear system and cf marker array */
if(mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array || mgr_data -> CF_marker_array)
{
for (i=1; i < num_coarse_levels+1; i++) {
hypre_ParVectorDestroy((mgr_data -> F_array)[i]);
hypre_ParVectorDestroy((mgr_data -> U_array)[i]);
if ((mgr_data -> P_array)[i-1])
hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i-1]);
if ((mgr_data -> RT_array)[i-1])
hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i-1]);
hypre_TFree((mgr_data -> CF_marker_array)[i-1], HYPRE_MEMORY_HOST);
}
for (i=1; i < (num_coarse_levels); i++) {
if ((mgr_data -> A_array)[i])
hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]);
}
}
if((mgr_data -> F_array))
{
hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST);
(mgr_data -> F_array) = NULL;
}
if((mgr_data -> U_array))
{
hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST);
(mgr_data -> U_array) = NULL;
}
if((mgr_data -> A_array))
{
hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST);
(mgr_data -> A_array) = NULL;
}
if((mgr_data -> P_array))
{
hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST);
(mgr_data -> P_array) = NULL;
}
if((mgr_data -> RT_array))
{
hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST);
(mgr_data -> RT_array) = NULL;
}
if((mgr_data -> CF_marker_array))
{
hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST);
(mgr_data -> CF_marker_array) = NULL;
}
if((mgr_data -> reserved_Cpoint_local_indexes))
{
hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> reserved_Cpoint_local_indexes) = NULL;
}
/* data for V-cycle F-relaxation */
if (mgr_data -> FrelaxVcycleData) {
for (i = 0; i < num_coarse_levels; i++) {
if ((mgr_data -> FrelaxVcycleData)[i]) {
hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]);
(mgr_data -> FrelaxVcycleData)[i] = NULL;
}
}
hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST);
mgr_data -> FrelaxVcycleData = NULL;
}
/* data for reserved coarse nodes */
if(mgr_data -> reserved_coarse_indexes)
{
hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST);
(mgr_data -> reserved_coarse_indexes) = NULL;
}
/* coarse level matrix - RAP */
if ((mgr_data -> RAP))
hypre_ParCSRMatrixDestroy((mgr_data -> RAP));
if ((mgr_data -> diaginv))
hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST);
/* mgr data */
hypre_TFree(mgr_data, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/* Create data for V-cycle F-relaxtion */
void *
hypre_MGRCreateFrelaxVcycleData()
{
hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST);
hypre_ParAMGDataAArray(vdata) = NULL;
hypre_ParAMGDataPArray(vdata) = NULL;
hypre_ParAMGDataFArray(vdata) = NULL;
hypre_ParAMGDataCFMarkerArray(vdata) = NULL;
hypre_ParAMGDataVtemp(vdata) = NULL;
hypre_ParAMGDataAMat(vdata) = NULL;
hypre_ParAMGDataBVec(vdata) = NULL;
hypre_ParAMGDataZtemp(vdata) = NULL;
hypre_ParAMGDataCommInfo(vdata) = NULL;
hypre_ParAMGDataUArray(vdata) = NULL;
hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL;
hypre_ParAMGDataNumLevels(vdata) = 0;
hypre_ParAMGDataMaxLevels(vdata) = 10;
return (void *) vdata;
}
/* Destroy data for V-cycle F-relaxation */
HYPRE_Int
hypre_MGRDestroyFrelaxVcycleData( void *data )
{
hypre_ParAMGData * vdata = (hypre_ParAMGData*) data;
HYPRE_Int i;
HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata);
MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata);
for (i=1; i < num_levels; i++)
{
hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]);
hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]);
if (hypre_ParAMGDataAArray(vdata)[i])
hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]);
if (hypre_ParAMGDataPArray(vdata)[i-1])
hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i-1]);
hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[i-1], HYPRE_MEMORY_HOST);
}
/* see comments in par_coarsen.c regarding special case for CF_marker */
if (num_levels == 1)
{
hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[0], HYPRE_MEMORY_HOST);
}
/* Points to vtemp of mgr_data, which is already destroyed */
// hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata));
hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST);
/* Points to ztemp of mgr_data, which is already destroyed */
/*
if (hypre_ParAMGDataZtemp(vdata))
hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata));
*/
if (hypre_ParAMGDataAMat(vdata)) hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST);
if (hypre_ParAMGDataBVec(vdata)) hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST);
if (hypre_ParAMGDataCommInfo(vdata)) hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST);
if (new_comm != hypre_MPI_COMM_NULL)
{
hypre_MPI_Comm_free (&new_comm);
}
hypre_TFree(vdata, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/* Set C-point variables for each reduction level */
/* Currently not implemented */
HYPRE_Int
hypre_MGRSetReductionLevelCpoints( void *mgr_vdata,
HYPRE_Int nlevels,
HYPRE_Int *num_coarse_points,
HYPRE_Int **level_coarse_indexes)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_coarse_levels) = nlevels;
(mgr_data -> num_coarse_per_level) = num_coarse_points;
(mgr_data -> level_coarse_indexes) = level_coarse_indexes;
return hypre_error_flag;
}
/* Initialize some data */
/* Set whether non-coarse points on each level should be explicitly tagged as F-points */
HYPRE_Int
hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag;
return hypre_error_flag;
}
/* Initialize/ set block data information */
HYPRE_Int
hypre_MGRSetCpointsByBlock( void *mgr_vdata,
HYPRE_Int block_size,
HYPRE_Int max_num_levels,
HYPRE_Int *block_num_coarse_points,
HYPRE_Int **block_coarse_indexes)
{
HYPRE_Int i,j;
HYPRE_Int **block_cf_marker = NULL;
HYPRE_Int *block_num_coarse_indexes = NULL;
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
/* free block cf_marker data if not previously destroyed */
if((mgr_data -> block_cf_marker) != NULL)
{
for (i=0; i < (mgr_data -> max_num_coarse_levels); i++)
{
if ((mgr_data -> block_cf_marker)[i])
{
hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker)[i] = NULL;
}
}
hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker) = NULL;
}
if((mgr_data -> block_num_coarse_indexes))
{
hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> block_num_coarse_indexes) = NULL;
}
/* store block cf_marker */
block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_levels; i++)
{
block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST);
memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int));
}
for (i = 0; i < max_num_levels; i++)
{
for(j=0; j<block_num_coarse_points[i]; j++)
{
(block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK;
}
}
/* store block_num_coarse_points */
if(max_num_levels > 0)
{
block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST);
for(i=0; i<max_num_levels; i++)
block_num_coarse_indexes[i] = block_num_coarse_points[i];
}
/* set block data */
(mgr_data -> max_num_coarse_levels) = max_num_levels;
(mgr_data -> block_size) = block_size;
(mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes;
(mgr_data -> block_cf_marker) = block_cf_marker;
return hypre_error_flag;
}
/*Set number of points that remain part of the coarse grid throughout the hierarchy */
HYPRE_Int
hypre_MGRSetReservedCoarseNodes(void *mgr_vdata,
HYPRE_Int reserved_coarse_size,
HYPRE_Int *reserved_cpt_index)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_BigInt *reserved_coarse_indexes = NULL;
HYPRE_Int i;
if (!mgr_data)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! MGR object empty!\n");
return hypre_error_flag;
}
if(reserved_coarse_size < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/* free data not previously destroyed */
if((mgr_data -> reserved_coarse_indexes))
{
hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> reserved_coarse_indexes) = NULL;
}
/* set reserved coarse nodes */
if(reserved_coarse_size > 0)
{
reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST);
for(i=0; i<reserved_coarse_size; i++)
reserved_coarse_indexes[i] = reserved_cpt_index[i];
}
(mgr_data -> reserved_coarse_size) = reserved_coarse_size;
(mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes;
return hypre_error_flag;
}
/* Set CF marker array */
HYPRE_Int
hypre_MGRCoarsen(hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix *A,
HYPRE_Int fixed_coarse_size,
HYPRE_Int *fixed_coarse_indexes,
HYPRE_Int debug_flag,
HYPRE_Int **CF_marker,
HYPRE_Int cflag)
{
HYPRE_Int *cf_marker, i, row, nc;
HYPRE_Int *cindexes = fixed_coarse_indexes;
HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
/* If this is the last level, coarsen onto fixed coarse set */
if(cflag)
{
if(*CF_marker != NULL)
{
hypre_TFree(*CF_marker, HYPRE_MEMORY_HOST);
}
cf_marker = hypre_CTAlloc(HYPRE_Int, nloc, HYPRE_MEMORY_HOST);
memset(cf_marker, FMRK, nloc*sizeof(HYPRE_Int));
/* first mark fixed coarse set */
nc = fixed_coarse_size;
for(i = 0; i < nc; i++)
{
cf_marker[cindexes[i]] = CMRK;
}
}
else {
/* First coarsen to get initial CF splitting.
* This is then followed by updating the CF marker to pass
* coarse information to the next levels. NOTE: It may be
* convenient to implement this way (allows the use of multiple
* coarsening strategies without changing too much code),
* but not necessarily the best option, compared to initializing
* CF_marker first and then coarsening on subgraph which excludes
* the initialized coarse nodes.
*/
hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, &cf_marker);
/* Update CF_marker to correct Cpoints marked as Fpoints. */
nc = fixed_coarse_size;
for(i = 0; i < nc; i++)
{
cf_marker[cindexes[i]] = CMRK;
}
/* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate
* between type of F-points (example Ruge coarsening). We do not need that distinction here.
*/
for (row = 0; row <nloc; row++)
{
if(cf_marker[row] == CMRK) continue;
cf_marker[row] = FMRK;
}
#if 0
/* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points
* in the next level.
*/
nc = 0;
index_i = 0;
for (row = 0; row <nloc; row++)
{
/* loop through new c-points */
if(cf_marker[row] == CMRK) nc++;
else if(cf_marker[row] == S_CMRK)
{
/* previously marked c-point is part of fixed coarse set. Track its current local index */
cindexes[index_i++] = nc;
/* reset c-point from S_CMRK to CMRK */
cf_marker[row] = CMRK;
nc++;
}
/* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate
* between type of F-points (example Ruge coarsening). We do not need that distinction here.
*/
else
{
cf_marker[row] = FMRK;
}
}
/* check if this should be last level */
if( nc == fixed_coarse_size)
last_level = 1;
//printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size);
#endif
}
/* set CF_marker */
*CF_marker = cf_marker;
return hypre_error_flag;
}
/* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */
HYPRE_Int
hypre_MGRBuildP( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int method,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *a_diag;
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
// HYPRE_Int jj_begin_row,jj_begin_row_offd;
// HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_BigInt *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
else
{
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_SHARED);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_SHARED);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_SHARED);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_SHARED);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
}
/* index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
big_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt;
}
comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
} */
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
//for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if ( i==i1 ) /* diagonal of A only */
{
a_diag[i] = 1.0/A_diag_data[jj];
}
}
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
if(method == 0)
{
P_diag_data[jj_counter] = 0.0;
}
else if (method == 1)
{
P_diag_data[jj_counter] = - A_diag_data[jj];
}
else if (method == 2)
{
P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i];
}
jj_counter++;
}
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
if(method == 0)
{
P_offd_data[jj_counter_offd] = 0.0;
}
else if (method == 1)
{
P_offd_data[jj_counter_offd] = - A_offd_data[jj];
}
else if (method == 2)
{
P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i];
}
jj_counter_offd++;
}
}
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
hypre_TFree(a_diag, HYPRE_MEMORY_HOST);
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
//hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return(0);
}
/* Interpolation for MGR - Dynamic Row Sum method */
HYPRE_Int
hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int blk_size,
HYPRE_Int reserved_coarse_size,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *a_diag;
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd;
HYPRE_Int *CF_marker_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
// HYPRE_Int jj_begin_row,jj_begin_row_offd;
// HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
else
{
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
/*--------------------------------------------------------------------
* Set up the indexes for the DRS method
*--------------------------------------------------------------------*/
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
//for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if ( i==i1 ) /* diagonal of A only */
{
a_diag[i] = 1.0/A_diag_data[jj];
}
}
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i];
jj_counter++;
}
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i];
jj_counter_offd++;
}
}
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
hypre_TFree(a_diag, HYPRE_MEMORY_HOST);
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
// hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return(0);
}
/* Setup interpolation operator */
HYPRE_Int
hypre_MGRBuildInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P,
HYPRE_Int last_level,
HYPRE_Int method,
HYPRE_Int numsweeps)
{
// HYPRE_Int i;
hypre_ParCSRMatrix *P_ptr = NULL;
// HYPRE_Real jac_trunc_threshold = trunc_factor;
// HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold;
/* Build interpolation operator using (hypre default) */
if(!last_level)
{
hypre_MGRBuildP( A,CF_marker,num_cpts_global,2,debug_flag,&P_ptr);
}
/* Do Jacobi interpolation for last level */
else
{
if (method <3)
{
hypre_MGRBuildP( A,CF_marker,num_cpts_global,method,debug_flag,&P_ptr);
/* Could do a few sweeps of Jacobi to further improve P */
//for(i=0; i<numsweeps; i++)
// hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus );
}
else
{
/* Classical modified interpolation */
hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global,1, NULL,debug_flag,
trunc_factor, max_elmts, col_offd_S_to_A, &P_ptr);
/* Do k steps of Jacobi build W for P = [-W I].
* Note that BoomerAMGJacobiInterp assumes you have some initial P,
* hence we need to initialize P as above, before calling this routine.
* If numsweeps = 0, the following step is skipped and P is returned as is.
* Looping here is equivalent to improving P by Jacobi interpolation
*/
// for(i=0; i<numsweeps; i++)
// hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker,
// 0, jac_trunc_threshold,
// jac_trunc_threshold_minus );
}
}
/* set pointer to P */
*P = P_ptr;
return hypre_error_flag;
}
void hypre_blas_smat_inv_n4 (HYPRE_Real *a)
{
const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3];
const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7];
const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11];
const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15];
const HYPRE_Real M11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42;
const HYPRE_Real M12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43;
const HYPRE_Real M13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42;
const HYPRE_Real M14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33;
const HYPRE_Real M21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43;
const HYPRE_Real M22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41;
const HYPRE_Real M23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43;
const HYPRE_Real M24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31;
const HYPRE_Real M31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41;
const HYPRE_Real M32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42;
const HYPRE_Real M33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41;
const HYPRE_Real M34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32;
const HYPRE_Real M41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42;
const HYPRE_Real M42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41;
const HYPRE_Real M43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42;
const HYPRE_Real M44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31;
const HYPRE_Real det = a11*M11 + a12*M21 + a13*M31 + a14*M41;
HYPRE_Real det_inv;
//if ( fabs(det) < 1e-22 ) {
/* there should be no print statements that can't be turned off. Is this an error? */
//hypre_fprintf(stderr, "### WARNING: Matrix is nearly singular! det = %e\n", det);
/*
printf("##----------------------------------------------\n");
printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2);
printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5);
printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7);
printf("##----------------------------------------------\n");
getchar();
*/
//}
det_inv = 1.0/det;
a[0] = M11*det_inv; a[1] = M12*det_inv; a[2] = M13*det_inv; a[3] = M14*det_inv;
a[4] = M21*det_inv; a[5] = M22*det_inv; a[6] = M23*det_inv; a[7] = M24*det_inv;
a[8] = M31*det_inv; a[9] = M32*det_inv; a[10] = M33*det_inv; a[11] = M34*det_inv;
a[12] = M41*det_inv; a[13] = M42*det_inv; a[14] = M43*det_inv; a[15] = M44*det_inv;
}
void hypre_blas_mat_inv(HYPRE_Real *a,
HYPRE_Int n)
{
HYPRE_Int i,j,k,l,u,kn,in;
HYPRE_Real alinv;
if (n == 4)
{
hypre_blas_smat_inv_n4(a);
}
else
{
for (k=0; k<n; ++k) {
kn = k*n;
l = kn+k;
//if (fabs(a[l]) < SMALLREAL) {
// printf("### WARNING: Diagonal entry is close to zero!");
// printf("### WARNING: diag_%d=%e\n", k, a[l]);
// a[l] = SMALLREAL;
//}
alinv = 1.0/a[l];
a[l] = alinv;
for (j=0; j<k; ++j) {
u = kn+j; a[u] *= alinv;
}
for (j=k+1; j<n; ++j) {
u = kn+j; a[u] *= alinv;
}
for (i=0; i<k; ++i) {
in = i*n;
for (j=0; j<n; ++j)
if (j!=k) {
u = in+j; a[u] -= a[in+k]*a[kn+j];
} // end if (j!=k)
}
for (i=k+1; i<n; ++i) {
in = i*n;
for (j=0; j<n; ++j)
if (j!=k) {
u = in+j; a[u] -= a[in+k]*a[kn+j];
} // end if (j!=k)
}
for (i=0; i<k; ++i) {
u=i*n+k; a[u] *= -alinv;
}
for (i=k+1; i<n; ++i) {
u=i*n+k; a[u] *= -alinv;
}
} // end for (k=0; k<n; ++k)
}// end if
}
HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr,
void *mgr_vdata, HYPRE_Int debug_flag)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int num_procs, my_id;
HYPRE_Int blk_size = (mgr_data -> block_size);
HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_ParCSRMatrix *B;
hypre_CSRMatrix *B_diag;
HYPRE_Real *B_diag_data;
HYPRE_Int *B_diag_i;
HYPRE_Int *B_diag_j;
hypre_CSRMatrix *B_offd;
HYPRE_Int i,ii;
HYPRE_Int j,jj;
HYPRE_Int k;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int n_block, left_size,inv_size;
// HYPRE_Real wall_time; /* for debugging instrumentation */
HYPRE_Int bidx,bidxm1,bidxp1;
HYPRE_Real * diaginv;
const HYPRE_Int nb2 = blk_size*blk_size;
HYPRE_Int block_scaling_error = 0;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
// HYPRE_Int num_threads = hypre_NumThreads();
//printf("n = %d\n",n);
if (my_id == num_procs)
{
n_block = (n - reserved_coarse_size) / blk_size;
left_size = n - blk_size*n_block;
}
else
{
n_block = n / blk_size;
left_size = n - blk_size*n_block;
}
inv_size = nb2*n_block + left_size*left_size;
//printf("inv_size = %d\n",inv_size);
hypre_blockRelax_setup(A,blk_size,reserved_coarse_size,&(mgr_data -> diaginv));
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*-----------------------------------------------------------------------
* First Pass: Determine size of B and fill in
*-----------------------------------------------------------------------*/
B_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST);
B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST);
B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
B_diag_i[n] = inv_size;
//B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST);
//B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
//B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST);
//B_offd_i[n] = 1;
/*-----------------------------------------------------------------
* Get all the diagonal sub-blocks
*-----------------------------------------------------------------*/
diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST);
//printf("n_block = %d\n",n_block);
for (i = 0;i < n_block; i++)
{
bidxm1 = i*blk_size;
bidxp1 = (i+1)*blk_size;
for (k = 0;k < blk_size; k++)
{
for (j = 0;j < blk_size; j++)
{
bidx = k*blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++)
{
jj = A_diag_j[ii];
if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL)
{
bidx = k*blk_size + jj - bidxm1;
//printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx);
diaginv[bidx] = A_diag_data[ii];
}
}
}
/* for (k = 0;k < blk_size; k++) */
/* { */
/* for (j = 0;j < blk_size; j++) */
/* { */
/* bidx = k*blk_size + j; */
/* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */
/* } */
/* } */
hypre_blas_mat_inv(diaginv, blk_size);
for (k = 0;k < blk_size; k++)
{
B_diag_i[i*blk_size+k] = i*nb2 + k*blk_size;
//B_offd_i[i*nb2+k] = 0;
for (j = 0;j < blk_size; j++)
{
bidx = i*nb2 + k*blk_size + j;
B_diag_j[bidx] = i*blk_size + j;
B_diag_data[bidx] = diaginv[k*blk_size + j];
}
}
}
//printf("Before create\n");
B = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixRowStarts(A),
hypre_ParCSRMatrixColStarts(A),
0,
inv_size,
0);
//printf("After create\n");
B_diag = hypre_ParCSRMatrixDiag(B);
hypre_CSRMatrixData(B_diag) = B_diag_data;
hypre_CSRMatrixI(B_diag) = B_diag_i;
hypre_CSRMatrixJ(B_diag) = B_diag_j;
B_offd = hypre_ParCSRMatrixOffd(B);
hypre_CSRMatrixData(B_offd) = NULL;
hypre_CSRMatrixI(B_offd) = NULL;
hypre_CSRMatrixJ(B_offd) = NULL;
/* hypre_ParCSRMatrixOwnsRowStarts(B) = 0; */
*B_ptr = B;
return(block_scaling_error);
}
HYPRE_Int hypre_block_jacobi (hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Real blk_size,
HYPRE_Int n_block,
HYPRE_Int left_size,
HYPRE_Real *diaginv,
hypre_ParVector *Vtemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data = NULL;
HYPRE_Real *v_buf_data;
HYPRE_Int i, j, k;
HYPRE_Int ii, jj;
HYPRE_Int bidx,bidx1;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, my_id;
HYPRE_Real *res;
const HYPRE_Int nb2 = blk_size*blk_size;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
// HYPRE_Int num_threads = hypre_NumThreads();
res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
//printf("u_old[%d] = %e\n",i,Vtemp_data[i]);
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax points block by block
*-----------------------------------------------------------------*/
for (i = 0;i < n_block; i++)
{
for (j = 0;j < blk_size; j++)
{
bidx = i*blk_size +j;
res[j] = f_data[bidx];
for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++)
{
ii = A_diag_j[jj];
res[j] -= A_diag_data[jj] * Vtemp_data[ii];
//printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]);
}
for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++)
{
ii = A_offd_j[jj];
res[j] -= A_offd_data[jj] * Vext_data[ii];
}
//printf("%d: res = %e\n",bidx,res[j]);
}
for (j = 0;j < blk_size; j++)
{
bidx1 = i*blk_size +j;
for (k = 0;k < blk_size; k++)
{
bidx = i*nb2 +j*blk_size+k;
u_data[bidx1] += res[k]*diaginv[bidx];
//printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]);
}
//printf("u[%d] = %e\n",bidx1,u_data[bidx1]);
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(res, HYPRE_MEMORY_HOST);
return(relax_error);
}
/*Block smoother*/
HYPRE_Int
hypre_blockRelax_setup(hypre_ParCSRMatrix *A,
HYPRE_Int blk_size,
HYPRE_Int reserved_coarse_size,
HYPRE_Real **diaginvptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int i, j,k;
HYPRE_Int ii, jj;
HYPRE_Int bidx,bidxm1,bidxp1;
HYPRE_Int num_procs, my_id;
const HYPRE_Int nb2 = blk_size*blk_size;
HYPRE_Int n_block;
HYPRE_Int left_size,inv_size;
HYPRE_Real *diaginv = *diaginvptr;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
// HYPRE_Int num_threads = hypre_NumThreads();
if (my_id == num_procs)
{
n_block = (n - reserved_coarse_size) / blk_size;
left_size = n - blk_size*n_block;
}
else
{
n_block = n / blk_size;
left_size = n - blk_size*n_block;
}
inv_size = nb2*n_block + left_size*left_size;
if (diaginv !=NULL)
{
hypre_TFree(diaginv, HYPRE_MEMORY_HOST);
diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
}
else {
diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
}
/*-----------------------------------------------------------------
* Get all the diagonal sub-blocks
*-----------------------------------------------------------------*/
for (i = 0;i < n_block; i++)
{
bidxm1 = i*blk_size;
bidxp1 = (i+1)*blk_size;
//printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1);
for (k = 0;k < blk_size; k++)
{
for (j = 0;j < blk_size; j++)
{
bidx = i*nb2 + k*blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++)
{
jj = A_diag_j[ii];
if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL)
{
bidx = i*nb2 + k*blk_size + jj - bidxm1;
//printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx);
diaginv[bidx] = A_diag_data[ii];
}
}
}
}
for (i = 0;i < left_size; i++)
{
bidxm1 =n_block*nb2 + i*blk_size;
bidxp1 =n_block*nb2 + (i+1)*blk_size;
for (j = 0;j < left_size; j++)
{
bidx = n_block*nb2 + i*blk_size +j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++)
{
jj = A_diag_j[ii];
if (jj > n_block*blk_size)
{
bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size;
diaginv[bidx] = A_diag_data[ii];
}
}
}
/*-----------------------------------------------------------------
* compute the inverses of all the diagonal sub-blocks
*-----------------------------------------------------------------*/
if (blk_size > 1)
{
for (i = 0;i < n_block; i++)
{
hypre_blas_mat_inv(diaginv+i*nb2, blk_size);
}
hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size);
}
else
{
for (i = 0;i < n; i++)
{
// FIX-ME: zero-diagonal should be tested previously
if (fabs(diaginv[i]) < SMALLREAL)
diaginv[i] = 0.0;
else
diaginv[i] = 1.0 / diaginv[i];
}
}
*diaginvptr = diaginv;
return 1;
}
HYPRE_Int
hypre_blockRelax(hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Int blk_size,
HYPRE_Int reserved_coarse_size,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int i, j,k;
HYPRE_Int ii, jj;
HYPRE_Int bidx,bidxm1,bidxp1;
HYPRE_Int relax_error = 0;
HYPRE_Int num_procs, my_id;
const HYPRE_Int nb2 = blk_size*blk_size;
HYPRE_Int n_block;
HYPRE_Int left_size,inv_size;
HYPRE_Real *diaginv;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
// HYPRE_Int num_threads = hypre_NumThreads();
if (my_id == num_procs)
{
n_block = (n - reserved_coarse_size) / blk_size;
left_size = n - blk_size*n_block;
}
else
{
n_block = n / blk_size;
left_size = n - blk_size*n_block;
}
inv_size = nb2*n_block + left_size*left_size;
diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------
* Get all the diagonal sub-blocks
*-----------------------------------------------------------------*/
for (i = 0;i < n_block; i++)
{
bidxm1 = i*blk_size;
bidxp1 = (i+1)*blk_size;
//printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1);
for (k = 0;k < blk_size; k++)
{
for (j = 0;j < blk_size; j++)
{
bidx = i*nb2 + k*blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++)
{
jj = A_diag_j[ii];
if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL)
{
bidx = i*nb2 + k*blk_size + jj - bidxm1;
//printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx);
diaginv[bidx] = A_diag_data[ii];
}
}
}
}
for (i = 0;i < left_size; i++)
{
bidxm1 =n_block*nb2 + i*blk_size;
bidxp1 =n_block*nb2 + (i+1)*blk_size;
for (j = 0;j < left_size; j++)
{
bidx = n_block*nb2 + i*blk_size +j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++)
{
jj = A_diag_j[ii];
if (jj > n_block*blk_size)
{
bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size;
diaginv[bidx] = A_diag_data[ii];
}
}
}
/*
for (i = 0;i < n_block; i++)
{
for (j = 0;j < blk_size; j++)
{
for (k = 0;k < blk_size; k ++)
{
bidx = i*nb2 + j*blk_size + k;
printf("%e\t",diaginv[bidx]);
}
printf("\n");
}
printf("\n");
}
*/
/*-----------------------------------------------------------------
* compute the inverses of all the diagonal sub-blocks
*-----------------------------------------------------------------*/
if (blk_size > 1)
{
for (i = 0;i < n_block; i++)
{
hypre_blas_mat_inv(diaginv+i*nb2, blk_size);
}
hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size);
/*
for (i = 0;i < n_block; i++)
{
for (j = 0;j < blk_size; j++)
{
for (k = 0;k < blk_size; k ++)
{
bidx = i*nb2 + j*blk_size + k;
printf("%e\t",diaginv[bidx]);
}
printf("\n");
}
printf("\n");
}
*/
}
else
{
for (i = 0;i < n; i++)
{
// FIX-ME: zero-diagonal should be tested previously
if (fabs(diaginv[i]) < SMALLREAL)
diaginv[i] = 0.0;
else
diaginv[i] = 1.0 / diaginv[i];
}
}
hypre_block_jacobi(A,f,u,blk_size,n_block,left_size,diaginv,Vtemp);
/*-----------------------------------------------------------------
* Free temperary memeory
*-----------------------------------------------------------------*/
hypre_TFree(diaginv, HYPRE_MEMORY_HOST);
return(relax_error);
}
/* set coarse grid solver */
HYPRE_Int
hypre_MGRSetCoarseSolver( void *mgr_vdata,
HYPRE_Int (*coarse_grid_solver_solve)(void*,void*,void*,void*),
HYPRE_Int (*coarse_grid_solver_setup)(void*,void*,void*,void*),
void *coarse_grid_solver )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
(mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve;
(mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup;
(mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver;
(mgr_data -> use_default_cgrid_solver) = 0;
return hypre_error_flag;
}
/* Set the maximum number of coarse levels.
* maxcoarselevs = 1 yields the default 2-grid scheme.
*/
HYPRE_Int
hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> max_num_coarse_levels) = maxcoarselevs;
return hypre_error_flag;
}
/* Set the system block size */
HYPRE_Int
hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> block_size) = bsize;
return hypre_error_flag;
}
/* Set the relaxation type for the fine levels of the reduction.
* Currently supports the following flavors of relaxation types
* as described in the documentation:
* relax_types 0 - 8, 13, 14, 18, 19, 98.
* See par_relax.c and par_relax_more.c for more details.
*
*/
HYPRE_Int
hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> relax_type) = relax_type;
return hypre_error_flag;
}
/* Set the number of relaxation sweeps */
HYPRE_Int
hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_relax_sweeps) = nsweeps;
return hypre_error_flag;
}
/* Set the F-relaxation strategy: 0=single level, 1=multi level
*/
HYPRE_Int
hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> Frelax_method) = relax_method;
return hypre_error_flag;
}
/* Set the type of the restriction type
* for computing restriction operator
*/
HYPRE_Int
hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> restrict_type) = restrict_type;
return hypre_error_flag;
}
/* Set the number of Jacobi interpolation iterations
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_restrict_sweeps) = nsweeps;
return hypre_error_flag;
}
/* Set the type of the interpolation
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> interp_type) = interpType;
return hypre_error_flag;
}
/* Set the number of Jacobi interpolation iterations
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_interp_sweeps) = nsweeps;
return hypre_error_flag;
}
/* Set print level for mgr solver */
HYPRE_Int
hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> print_level) = print_level;
return hypre_error_flag;
}
/* Set print level for mgr solver */
HYPRE_Int
hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> logging) = logging;
return hypre_error_flag;
}
/* Set max number of iterations for mgr solver */
HYPRE_Int
hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> max_iter) = max_iter;
return hypre_error_flag;
}
/* Set convergence tolerance for mgr solver */
HYPRE_Int
hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> tol) = tol;
return hypre_error_flag;
}
/* Set max number of iterations for mgr solver */
HYPRE_Int
hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> global_smooth_iters) = max_iter;
return hypre_error_flag;
}
/* Set max number of iterations for mgr solver */
HYPRE_Int
hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> global_smooth_type) = iter_type;
return hypre_error_flag;
}
/* Get number of iterations for MGR solver */
HYPRE_Int
hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*num_iterations = mgr_data->num_iterations;
return hypre_error_flag;
}
/* Get residual norms for MGR solver */
HYPRE_Int
hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*res_norm = mgr_data->final_rel_residual_norm;
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRBuildAff( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions,
HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr,
hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr )
{
HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST);
HYPRE_Int i;
for (i = 0; i < local_num_variables; i++) {
CF_marker_copy[i] = -CF_marker[i];
}
hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr);
hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr);
hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr);
hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST);
return 0;
}
/* Get pointer to coarse grid matrix for MGR solver */
HYPRE_Int
hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (mgr_data -> RAP == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC," Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n");
return hypre_error_flag;
}
*RAP = mgr_data->RAP;
return hypre_error_flag;
}
/* Get pointer to coarse grid solution for MGR solver */
HYPRE_Int
hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (mgr_data -> U_array == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n");
return hypre_error_flag;
}
*sol = mgr_data->U_array[mgr_data->num_coarse_levels];
return hypre_error_flag;
}
/* Get pointer to coarse grid solution for MGR solver */
HYPRE_Int
hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (mgr_data -> F_array == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n");
return hypre_error_flag;
}
*rhs = mgr_data->F_array[mgr_data->num_coarse_levels];
return hypre_error_flag;
}
/* Print coarse grid linear system (for debugging)*/
HYPRE_Int
hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
mgr_data->print_coarse_system = print_flag;
return hypre_error_flag;
}
/* Print solver params */
HYPRE_Int
hypre_MGRWriteSolverParams(void *mgr_vdata)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
hypre_printf("MGR Setup parameters: \n");
hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels));
hypre_printf("Block size: %d\n", (mgr_data -> block_size));
hypre_printf("Number of coarse indexes: %d\n", (mgr_data -> num_coarse_indexes));
hypre_printf("reserved coarse nodes size: %d\n", (mgr_data -> reserved_coarse_size));
hypre_printf("\n MGR Solver Parameters: \n");
hypre_printf("F-relaxation Method: %d\n", (mgr_data -> Frelax_method));
hypre_printf("Relax type: %d\n", (mgr_data -> relax_type));
hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps));
hypre_printf("Interpolation type: %d\n", (mgr_data -> interp_type));
hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps));
hypre_printf("Restriction type: %d\n", (mgr_data -> restrict_type));
hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps));
hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type));
hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters));
hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter));
hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol));
return hypre_error_flag;
}
|
compute_simulation_beamlet.c | /*
This file is part of the MCsquare software
Copyright © 2016-2017 Université catholique de Louvain (UCL)
All rights reserved.
The MCsquare software has been developed by Kevin Souris from UCL in the context of a collaboration with IBA s.a.
Each use of this software must be attributed to Université catholique de Louvain (UCL, Louvain-la-Neuve). Any other additional authorizations may be asked to LTTO@uclouvain.be.
The MCsquare software is released under the terms of the open-source Apache 2.0 license. Anyone can use or modify the code provided that the Apache 2.0 license conditions are met. See the Apache 2.0 license for more details https://www.apache.org/licenses/LICENSE-2.0
The MCsquare software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "include/compute_simulation_beamlet.h"
void Run_simulation_beamlet(DATA_config *config, Materials *material, DATA_CT **CT_phases, plan_parameters *plan, machine_parameters *machine, DATA_4D_Fields *Fields){
int spotID;
int tot_phases;
if(config->Simu_4D_Mode == 0) tot_phases = 1;
else tot_phases = config->Num_4DCT_phases;
// Parallelisation
#pragma omp parallel for shared(config, material, CT_phases, plan, machine, Fields, tot_phases) schedule(dynamic,1)
// #pragma omp parallel for shared(config, material, CT_phases, plan, machine, Fields, tot_phases) ordered schedule(dynamic,1)
for(spotID=0; spotID<config->TotalNbrSpots; spotID++){
double time_init, time_MC, time_end;
char file_path[200], output_beamlet_suffix[200], output_4D_suffix[200];
int tid = omp_get_thread_num();
VAR_SCORING *energy_accumulation;
VAR_SCORING *dose_accumulation;
VAR_SCORING *PG_accumulation;
VAR_SCORING *PG_Spectrum_accumulation;
VAR_SCORING *LET_accumulation;
VAR_COMPUTE *deformed;
VAR_COMPUTE norm_factor = 1;
DATA_CT *ct = NULL;
int a,b,c,d;
// Create a new plan containing only the current spot
plan_parameters *Beamlet = Init_single_spot_plan(plan);
int current_spot = 0;
int spot_selected = 0;
for(b=0; b < plan->NumberOfFields; b++){
for(c=0; c < plan->fields[b].NumberOfControlPoints; c++){
for(d=0; d < plan->fields[b].ControlPoints[c].NbOfScannedSpots; d++){
if(current_spot == spotID){
Select_spot(plan, Beamlet, b, c, d);
spot_selected = 1;
break;
}
current_spot++;
}
if(spot_selected == 1) break;
}
if(spot_selected == 1) break;
}
for(a=0; a <tot_phases; a++){
time_init = omp_get_wtime();
if(config->Simu_4D_Mode == 0) ct = CT_phases[0];
else ct = CT_phases[a];
unsigned long Nbr_simulated_primaries = 0;
// Init RNG
VSLStreamStatePtr RNDstream; // un stream de RNG par thread
ALIGNED_(64) VAR_COMPUTE v_rnd[VLENGTH]; // vecteur de nbr aleatoires
if(config->RNG_Seed == 0){
vslNewStream(&RNDstream, VSL_BRNG_MCG59, time(NULL)+tid); // initialisation du stream du RNG avec le seed (time+thread_id)
}
else{
vslNewStream(&RNDstream, VSL_BRNG_MCG59, config->RNG_Seed+tid);
}
rand_uniform(RNDstream, v_rnd); // on genere une première fois un set de nbr car les premiers semblent mal distribués
// Init scoring
DATA_Scoring Tot_scoring = Init_Scoring(config, ct->Nbr_voxels, 1);
// Init particle stacks
Hadron hadron;
Init_particles(&hadron);
Hadron_buffer HadronToSimulate[100];
int Nbr_HadronToSimulate = 0;
// variables
int i, count, stop = 0;
// Compute simulation
while(stop == 0){
for(i=0; i<VLENGTH; i++){
if(hadron.v_type[i] == Unknown){
if(Nbr_HadronToSimulate > 0){
Nbr_HadronToSimulate -= 1;
Insert_particle(&hadron, i, &HadronToSimulate[Nbr_HadronToSimulate]);
}
else if(Nbr_simulated_primaries < config->Num_Primaries){
Nbr_simulated_primaries += VLENGTH;
//Generate_particle(&hadron, i, BeamPOSx, BeamPOSy, BeamPOSz, PEnergy*UMeV);
Generate_PBS_particle(HadronToSimulate, &Nbr_HadronToSimulate, ct->Length, Beamlet, machine, RNDstream, config, material);
if(Nbr_HadronToSimulate > 0){
Nbr_HadronToSimulate -= 1;
Insert_particle(&hadron, i, &HadronToSimulate[Nbr_HadronToSimulate]);
}
}
else{
count = __sec_reduce_add(hadron.v_type[vALL]);
if(count == 0) stop = 1;
}
}
}
hadron_step(&hadron, &Tot_scoring, material, ct, HadronToSimulate, &Nbr_HadronToSimulate, RNDstream, config);
}
time_MC = omp_get_wtime();
// Scoring post processing: (convert energy to dose per proton, etc...)
PostProcess_Scoring(&Tot_scoring, ct, material, plan->normalization_factor, Nbr_simulated_primaries, config);
// 4D Accumulation:
int export_results = 0;
int ii;
if((config->Simu_4D_Mode == 0 || config->Dose_4D_Accumulation == 0) && config->Fraction_accumulation == 0) export_results = 1;
else{
strcpy(config->output_4D_suffix, "");
if(config->Simu_4D_Mode == 1 && config->Dose_4D_Accumulation == 1) norm_factor /= config->Num_4DCT_phases;
if(config->Fraction_accumulation == 1) norm_factor /= plan->NumberOfFractions;
if(config->Energy_ASCII_Output == 1 || config->Energy_MHD_Output == 1 || config->Energy_Sparse_Output == 1){
if(a == 0 && (config->Current_fraction == 1 || config->Fraction_accumulation == 0)) energy_accumulation = (VAR_SCORING*)calloc(ct->Nbr_voxels, sizeof(VAR_SCORING));
if(config->Simu_4D_Mode == 0){
for(ii=0; ii<ct->Nbr_voxels; ii++) energy_accumulation[ii] += Tot_scoring.energy[ii] * norm_factor;
}
else{
deformed = Image_deformation(Tot_scoring.energy, ct->GridSize, ct->VoxelLength, ct->Origin, Fields->Phase2Ref[a], Fields->GridSize, Fields->Spacing, Fields->Origin);
for(ii=0; ii<ct->Nbr_voxels; ii++) energy_accumulation[ii] += deformed[ii] * norm_factor;
free(deformed);
}
if(a == (config->Num_4DCT_phases-1) && config->Current_fraction == plan->NumberOfFractions){
export_results = 1;
if(Tot_scoring.energy != NULL) free(Tot_scoring.energy);
Tot_scoring.energy = energy_accumulation;
}
}
if(config->Compute_DVH == 1 || config->Dose_ASCII_Output == 1 || config->Dose_MHD_Output == 1 || config->Dose_Sparse_Output == 1){
if(a == 0 && (config->Current_fraction == 1 || config->Fraction_accumulation == 0)) dose_accumulation = (VAR_SCORING*)calloc(ct->Nbr_voxels, sizeof(VAR_SCORING));
if(config->Simu_4D_Mode == 0){
for(ii=0; ii<ct->Nbr_voxels; ii++) dose_accumulation[ii] += Tot_scoring.dose[ii] * norm_factor;
}
else{
deformed = Image_deformation(Tot_scoring.dose, ct->GridSize, ct->VoxelLength, ct->Origin, Fields->Phase2Ref[a], Fields->GridSize, Fields->Spacing, Fields->Origin);
for(ii=0; ii<ct->Nbr_voxels; ii++) dose_accumulation[ii] += deformed[ii] * norm_factor;
free(deformed);
}
if(a == (config->Num_4DCT_phases-1) && config->Current_fraction == plan->NumberOfFractions){
export_results = 1;
if(Tot_scoring.dose != NULL) free(Tot_scoring.dose);
Tot_scoring.dose = dose_accumulation;
}
}
if(config->Score_PromptGammas == 1){
if(a == 0 && (config->Current_fraction == 1 || config->Fraction_accumulation == 0)){
PG_accumulation = (VAR_SCORING*)calloc(ct->Nbr_voxels, sizeof(VAR_SCORING));
PG_Spectrum_accumulation = (VAR_SCORING*)calloc(config->PG_Spectrum_NumBin, sizeof(VAR_SCORING));
}
if(config->Simu_4D_Mode == 0){
for(ii=0; ii<ct->Nbr_voxels; ii++) PG_accumulation[ii] += Tot_scoring.PG_particles[ii] * norm_factor;
}
else{
deformed = Image_deformation(Tot_scoring.PG_particles, ct->GridSize, ct->VoxelLength, ct->Origin, Fields->Phase2Ref[a], Fields->GridSize, Fields->Spacing, Fields->Origin);
for(ii=0; ii<ct->Nbr_voxels; ii++) PG_accumulation[ii] += deformed[ii] * norm_factor;
free(deformed);
}
for(ii=0; ii<config->PG_Spectrum_NumBin; ii++) PG_Spectrum_accumulation[ii] += Tot_scoring.PG_spectrum[ii];
if(a == (config->Num_4DCT_phases-1) && config->Current_fraction == plan->NumberOfFractions){
export_results = 1;
if(Tot_scoring.PG_particles != NULL) free(Tot_scoring.PG_particles);
if(Tot_scoring.PG_spectrum != NULL) free(Tot_scoring.PG_spectrum);
Tot_scoring.PG_particles = PG_accumulation;
Tot_scoring.PG_spectrum = PG_Spectrum_accumulation;
}
}
if(config->Score_LET == 1){
if(a == 0 && (config->Current_fraction == 1 || config->Fraction_accumulation == 0)) LET_accumulation = (VAR_SCORING*)calloc(ct->Nbr_voxels, sizeof(VAR_SCORING));
if(config->Simu_4D_Mode == 0){
for(ii=0; ii<ct->Nbr_voxels; ii++) LET_accumulation[ii] += Tot_scoring.LET[ii] * norm_factor;
}
else{
deformed = Image_deformation(Tot_scoring.LET, ct->GridSize, ct->VoxelLength, ct->Origin, Fields->Phase2Ref[a], Fields->GridSize, Fields->Spacing, Fields->Origin);
for(ii=0; ii<ct->Nbr_voxels; ii++) LET_accumulation[ii] += deformed[ii] * norm_factor;
free(deformed);
}
if(a == (config->Num_4DCT_phases-1) && config->Current_fraction == plan->NumberOfFractions){
export_results = 1;
if(Tot_scoring.LET != NULL) free(Tot_scoring.LET);
Tot_scoring.LET = LET_accumulation;
}
}
}
// Export results
// Convert the dose in Gray units for DVH calculation
// 1 eV = 1.602176e-19 J
VAR_SCORING DoseScaling = 1.602176e-19 * 1000 * Beamlet->cumulative_weight * Beamlet->NumberOfFractions;
if(export_results == 1){
sprintf(output_beamlet_suffix, "_Beamlet_%d_%d_%d", b, c, d);
if(config->Simu_4D_Mode == 0 || config->Dose_4D_Accumulation == 1){
sprintf(output_4D_suffix, "");
}
else{ // 4D mode
sprintf(output_4D_suffix, "_Phase%d", a+1);
}
if(config->Compute_DVH == 1){
#pragma omp critical (Outputs)
{
sprintf(config->output_beamlet_suffix, output_beamlet_suffix);
sprintf(config->output_4D_suffix, output_4D_suffix);
config->Current_4D_phase = a;
compute_all_DVH(config, Tot_scoring.dose, DoseScaling);
}
}
if(config->Energy_ASCII_Output == 1){
strcpy(file_path, config->Output_Directory);
strcat(file_path, "Energy");
strcat(file_path, config->output_robustness_suffix);
strcat(file_path, output_beamlet_suffix);
strcat(file_path, output_4D_suffix);
strcat(file_path, ".dat");
export_dose_ascii(file_path, ct->GridSize, Tot_scoring.energy);
}
if(config->Energy_MHD_Output == 1){
strcpy(file_path, config->Output_Directory);
strcat(file_path, "Energy");
strcat(file_path, config->output_robustness_suffix);
strcat(file_path, output_beamlet_suffix);
strcat(file_path, output_4D_suffix);
strcat(file_path, ".mhd");
export_MHD_image(file_path, ct->GridSize, ct->VoxelLength, Tot_scoring.energy);
}
if(config->Dose_ASCII_Output == 1){
strcpy(file_path, config->Output_Directory);
strcat(file_path, "Dose");
strcat(file_path, config->output_robustness_suffix);
strcat(file_path, output_beamlet_suffix);
strcat(file_path, output_4D_suffix);
strcat(file_path, ".dat");
export_dose_ascii(file_path, ct->GridSize, Tot_scoring.dose);
}
if(config->Dose_MHD_Output == 1){
strcpy(file_path, config->Output_Directory);
strcat(file_path, "Dose");
strcat(file_path, config->output_robustness_suffix);
strcat(file_path, output_beamlet_suffix);
strcat(file_path, output_4D_suffix);
strcat(file_path, ".mhd");
export_MHD_image(file_path, ct->GridSize, ct->VoxelLength, Tot_scoring.dose);
}
if(export_results == 1 && config->LET_ASCII_Output == 1){
strcpy(file_path, config->Output_Directory);
strcat(file_path, "LET");
strcat(file_path, config->output_robustness_suffix);
strcat(file_path, output_beamlet_suffix);
strcat(file_path, output_4D_suffix);
strcat(file_path, ".dat");
export_dose_ascii(file_path, ct->GridSize, Tot_scoring.LET);
}
if(export_results == 1 && config->LET_MHD_Output == 1){
strcpy(file_path, config->Output_Directory);
strcat(file_path, "LET");
strcat(file_path, config->output_robustness_suffix);
strcat(file_path, output_beamlet_suffix);
strcat(file_path, output_4D_suffix);
strcat(file_path, ".mhd");
export_MHD_image(file_path, ct->GridSize, ct->VoxelLength, Tot_scoring.LET);
}
if(config->Score_PromptGammas == 1){
strcpy(file_path, config->Output_Directory);
strcat(file_path, "PromptGamma");
strcat(file_path, config->output_robustness_suffix);
strcat(file_path, output_beamlet_suffix);
strcat(file_path, output_4D_suffix);
strcat(file_path, ".dat");
export_PG_ascii(file_path, ct->GridSize, Tot_scoring.PG_particles);
strcpy(file_path, config->Output_Directory);
strcat(file_path, "PromptGamma_spectrum");
strcat(file_path, config->output_robustness_suffix);
strcat(file_path, output_beamlet_suffix);
strcat(file_path, output_4D_suffix);
strcat(file_path, ".dat");
export_PG_spectrum_ascii(file_path, config->PG_Spectrum_NumBin, config->PG_Spectrum_Binning, Tot_scoring.PG_spectrum);
}
if(export_results == 1 && config->Energy_Sparse_Output == 1){
strcpy(file_path, config->Output_Directory);
strcat(file_path, "tmp/");
CreateDir(file_path);
sprintf(file_path, "%sBeamlet_%d/", file_path, current_spot+1);
CreateDir(file_path);
strcat(file_path, "Sparse_Energy");
strcat(file_path, config->output_robustness_suffix);
strcat(file_path, output_4D_suffix);
strcat(file_path, ".txt");
if(config->Simu_4D_Mode == 1 && config->Dose_4D_Accumulation == 0){
#pragma omp critical (Outputs)
{
sprintf(config->output_beamlet_suffix, output_beamlet_suffix);
sprintf(config->output_4D_suffix, output_4D_suffix);
config->Current_4D_phase = a;
export_Sparse_image(file_path, config, ct, Beamlet, Tot_scoring.energy, config->Energy_Sparse_Threshold);
}
}
else export_Sparse_image(file_path, config, ct, Beamlet, Tot_scoring.energy, config->Energy_Sparse_Threshold);
}
if(export_results == 1 && config->Dose_Sparse_Output == 1){
strcpy(file_path, config->Output_Directory);
strcat(file_path, "tmp/");
CreateDir(file_path);
sprintf(file_path, "%sBeamlet_%d/", file_path, current_spot+1);
CreateDir(file_path);
strcat(file_path, "Sparse_Dose");
strcat(file_path, config->output_robustness_suffix);
strcat(file_path, output_4D_suffix);
strcat(file_path, ".txt");
if(config->Simu_4D_Mode == 1 && config->Dose_4D_Accumulation == 0){
#pragma omp critical (Outputs)
{
sprintf(config->output_beamlet_suffix, output_beamlet_suffix);
sprintf(config->output_4D_suffix, output_4D_suffix);
config->Current_4D_phase = a;
export_Sparse_image(file_path, config, ct, Beamlet, Tot_scoring.dose, config->Dose_Sparse_Threshold);
}
}
else export_Sparse_image(file_path, config, ct, Beamlet, Tot_scoring.dose, config->Dose_Sparse_Threshold);
}
if(export_results == 1 && config->LET_Sparse_Output == 1){
strcpy(file_path, config->Output_Directory);
strcat(file_path, "tmp/");
CreateDir(file_path);
sprintf(file_path, "%sBeamlet_%d/", file_path, current_spot+1);
CreateDir(file_path);
strcat(file_path, "Sparse_LET");
strcat(file_path, config->output_robustness_suffix);
strcat(file_path, output_4D_suffix);
strcat(file_path, ".txt");
if(config->Simu_4D_Mode == 1 && config->Dose_4D_Accumulation == 0){
#pragma omp critical (Outputs)
{
sprintf(config->output_beamlet_suffix, output_beamlet_suffix);
sprintf(config->output_4D_suffix, output_4D_suffix);
config->Current_4D_phase = a;
export_Sparse_image(file_path, config, ct, Beamlet, Tot_scoring.LET, config->LET_Sparse_Threshold);
}
}
else export_Sparse_image(file_path, config, ct, Beamlet, Tot_scoring.LET, config->LET_Sparse_Threshold);
}
} // end export_results == 1
#pragma omp critical (Display)
{
time_end = omp_get_wtime();
if(config->Simu_4D_Mode == 0) printf("\nSimulation of beamlet %d/%d %s \n", current_spot+1, config->TotalNbrSpots, config->output_robustness_suffix);
else printf("\nSimulation of beamlet %d/%d Phase%d %s \n", current_spot+1, config->TotalNbrSpots, a+1, config->output_robustness_suffix);
printf("MC computation time: %f s \n", (time_MC-time_init));
printf("Output computation time: %f s \n", (time_end-time_MC));
}
// Delete dynamic variables
Free_Scoring(&Tot_scoring);
} // end 4D phases loop
Free_Plan_Parameters(Beamlet);
} // end Parallelisation
#pragma omp barrier
if(config->Energy_Sparse_Output == 1 || config->Dose_Sparse_Output == 1){
char InPath[200], InFile[200], OutPath[200];
if(config->Simu_4D_Mode == 0 || config->Dose_4D_Accumulation == 1) tot_phases = 1;
else tot_phases = config->Num_4DCT_phases;
int aa;
for(aa=0; aa <tot_phases; aa++){
if(config->Simu_4D_Mode == 0 || config->Dose_4D_Accumulation == 1){
sprintf(config->output_4D_suffix, "");
}
else{ // 4D mode
sprintf(config->output_4D_suffix, "_Phase%d", aa+1);
config->Current_4D_phase = aa;
}
if(config->Energy_Sparse_Output == 1){
sprintf(InPath, "%stmp/Beamlet_", config->Output_Directory);
sprintf(InFile, "Sparse_Energy%s%s.txt", config->output_robustness_suffix, config->output_4D_suffix);
sprintf(OutPath, "%sSparse_Energy%s%s.txt", config->Output_Directory, config->output_robustness_suffix, config->output_4D_suffix);
Merge_Sparse_Files(InPath, InFile, config->TotalNbrSpots, OutPath);
}
if(config->Dose_Sparse_Output == 1){
sprintf(InPath, "%stmp/Beamlet_", config->Output_Directory);
sprintf(InFile, "Sparse_Dose%s%s.txt", config->output_robustness_suffix, config->output_4D_suffix);
sprintf(OutPath, "%sSparse_Dose%s%s.txt", config->Output_Directory, config->output_robustness_suffix, config->output_4D_suffix);
Merge_Sparse_Files(InPath, InFile, config->TotalNbrSpots, OutPath);
}
if(config->LET_Sparse_Output == 1){
sprintf(InPath, "%stmp/Beamlet_", config->Output_Directory);
sprintf(InFile, "Sparse_LET%s%s.txt", config->output_robustness_suffix, config->output_4D_suffix);
sprintf(OutPath, "%sSparse_LET%s%s.txt", config->Output_Directory, config->output_robustness_suffix, config->output_4D_suffix);
Merge_Sparse_Files(InPath, InFile, config->TotalNbrSpots, OutPath);
}
}
}
}
|
bt-long.c | typedef signed char __int8_t;
typedef unsigned char __uint8_t;
typedef short __int16_t;
typedef unsigned short __uint16_t;
typedef int __int32_t;
typedef unsigned int __uint32_t;
typedef long long __int64_t;
typedef unsigned long long __uint64_t;
typedef long __darwin_intptr_t;
typedef unsigned int __darwin_natural_t;
typedef int __darwin_ct_rune_t;
union stUn_imopVarPre0 {
char __mbstate8[128];
long long _mbstateL;
} ;
typedef union stUn_imopVarPre0 __mbstate_t;
typedef __mbstate_t __darwin_mbstate_t;
typedef long int __darwin_ptrdiff_t;
typedef long unsigned int __darwin_size_t;
typedef __builtin_va_list __darwin_va_list;
typedef int __darwin_wchar_t;
typedef __darwin_wchar_t __darwin_rune_t;
typedef int __darwin_wint_t;
typedef unsigned long __darwin_clock_t;
typedef __uint32_t __darwin_socklen_t;
typedef long __darwin_ssize_t;
typedef long __darwin_time_t;
typedef __int64_t __darwin_blkcnt_t;
typedef __int32_t __darwin_blksize_t;
typedef __int32_t __darwin_dev_t;
typedef unsigned int __darwin_fsblkcnt_t;
typedef unsigned int __darwin_fsfilcnt_t;
typedef __uint32_t __darwin_gid_t;
typedef __uint32_t __darwin_id_t;
typedef __uint64_t __darwin_ino64_t;
typedef __darwin_ino64_t __darwin_ino_t;
typedef __darwin_natural_t __darwin_mach_port_name_t;
typedef __darwin_mach_port_name_t __darwin_mach_port_t;
typedef __uint16_t __darwin_mode_t;
typedef __int64_t __darwin_off_t;
typedef __int32_t __darwin_pid_t;
typedef __uint32_t __darwin_sigset_t;
typedef __int32_t __darwin_suseconds_t;
typedef __uint32_t __darwin_uid_t;
typedef __uint32_t __darwin_useconds_t;
typedef unsigned char __darwin_uuid_t[16];
typedef char __darwin_uuid_string_t[37];
struct __darwin_pthread_handler_rec {
void ( *__routine )(void *);
void *__arg;
struct __darwin_pthread_handler_rec *__next;
} ;
struct _opaque_pthread_attr_t {
long __sig;
char __opaque[56];
} ;
struct _opaque_pthread_cond_t {
long __sig;
char __opaque[40];
} ;
struct _opaque_pthread_condattr_t {
long __sig;
char __opaque[8];
} ;
struct _opaque_pthread_mutex_t {
long __sig;
char __opaque[56];
} ;
struct _opaque_pthread_mutexattr_t {
long __sig;
char __opaque[8];
} ;
struct _opaque_pthread_once_t {
long __sig;
char __opaque[8];
} ;
struct _opaque_pthread_rwlock_t {
long __sig;
char __opaque[192];
} ;
struct _opaque_pthread_rwlockattr_t {
long __sig;
char __opaque[16];
} ;
struct _opaque_pthread_t {
long __sig;
struct __darwin_pthread_handler_rec *__cleanup_stack;
char __opaque[8176];
} ;
typedef struct _opaque_pthread_attr_t __darwin_pthread_attr_t;
typedef struct _opaque_pthread_cond_t __darwin_pthread_cond_t;
typedef struct _opaque_pthread_condattr_t __darwin_pthread_condattr_t;
typedef unsigned long __darwin_pthread_key_t;
typedef struct _opaque_pthread_mutex_t __darwin_pthread_mutex_t;
typedef struct _opaque_pthread_mutexattr_t __darwin_pthread_mutexattr_t;
typedef struct _opaque_pthread_once_t __darwin_pthread_once_t;
typedef struct _opaque_pthread_rwlock_t __darwin_pthread_rwlock_t;
typedef struct _opaque_pthread_rwlockattr_t __darwin_pthread_rwlockattr_t;
typedef struct _opaque_pthread_t *__darwin_pthread_t;
typedef int __darwin_nl_item;
typedef int __darwin_wctrans_t;
typedef __uint32_t __darwin_wctype_t;
typedef __darwin_va_list va_list;
typedef __darwin_size_t size_t;
typedef __darwin_off_t fpos_t;
struct __sbuf {
unsigned char *_base;
int _size;
} ;
struct __sFILEX ;
struct __sFILE {
unsigned char *_p;
int _r;
int _w;
short _flags;
short _file;
struct __sbuf _bf;
int _lbfsize;
void *_cookie;
int ( *_close )(void *);
int ( *_read )(void *, char * , int );
fpos_t ( *_seek )(void *, fpos_t , int );
int ( *_write )(void *, const char * , int );
struct __sbuf _ub;
struct __sFILEX *_extra;
int _ur;
unsigned char _ubuf[3];
unsigned char _nbuf[1];
struct __sbuf _lb;
int _blksize;
fpos_t _offset;
} ;
typedef struct __sFILE FILE;
int fclose(FILE *);
int fgetc(FILE *);
FILE *fopen(const char *restrict __filename, const char *restrict __mode);
int fscanf(FILE *restrict , const char *restrict , ...);
int printf(const char *restrict , ...);
typedef __darwin_off_t off_t;
typedef __darwin_ssize_t ssize_t;
enum enum_imopVarPre1 {
P_ALL, P_PID , P_PGID
} ;
typedef enum enum_imopVarPre1 idtype_t;
typedef __darwin_pid_t pid_t;
typedef __darwin_id_t id_t;
typedef int sig_atomic_t;
struct __darwin_i386_thread_state {
unsigned int __eax;
unsigned int __ebx;
unsigned int __ecx;
unsigned int __edx;
unsigned int __edi;
unsigned int __esi;
unsigned int __ebp;
unsigned int __esp;
unsigned int __ss;
unsigned int __eflags;
unsigned int __eip;
unsigned int __cs;
unsigned int __ds;
unsigned int __es;
unsigned int __fs;
unsigned int __gs;
} ;
struct __darwin_fp_control {
unsigned short __invalid: 1, __denorm: 1 , __zdiv: 1 , __ovrfl: 1 , __undfl: 1 , __precis: 1 , :2 , __pc: 2 , __rc: 2 , :1 , :3;
} ;
typedef struct __darwin_fp_control __darwin_fp_control_t;
struct __darwin_fp_status {
unsigned short __invalid: 1, __denorm: 1 , __zdiv: 1 , __ovrfl: 1 , __undfl: 1 , __precis: 1 , __stkflt: 1 , __errsumm: 1 , __c0: 1 , __c1: 1 , __c2: 1 , __tos: 3 , __c3: 1 , __busy: 1;
} ;
typedef struct __darwin_fp_status __darwin_fp_status_t;
struct __darwin_mmst_reg {
char __mmst_reg[10];
char __mmst_rsrv[6];
} ;
struct __darwin_xmm_reg {
char __xmm_reg[16];
} ;
struct __darwin_i386_float_state {
int __fpu_reserved[2];
struct __darwin_fp_control __fpu_fcw;
struct __darwin_fp_status __fpu_fsw;
__uint8_t __fpu_ftw;
__uint8_t __fpu_rsrv1;
__uint16_t __fpu_fop;
__uint32_t __fpu_ip;
__uint16_t __fpu_cs;
__uint16_t __fpu_rsrv2;
__uint32_t __fpu_dp;
__uint16_t __fpu_ds;
__uint16_t __fpu_rsrv3;
__uint32_t __fpu_mxcsr;
__uint32_t __fpu_mxcsrmask;
struct __darwin_mmst_reg __fpu_stmm0;
struct __darwin_mmst_reg __fpu_stmm1;
struct __darwin_mmst_reg __fpu_stmm2;
struct __darwin_mmst_reg __fpu_stmm3;
struct __darwin_mmst_reg __fpu_stmm4;
struct __darwin_mmst_reg __fpu_stmm5;
struct __darwin_mmst_reg __fpu_stmm6;
struct __darwin_mmst_reg __fpu_stmm7;
struct __darwin_xmm_reg __fpu_xmm0;
struct __darwin_xmm_reg __fpu_xmm1;
struct __darwin_xmm_reg __fpu_xmm2;
struct __darwin_xmm_reg __fpu_xmm3;
struct __darwin_xmm_reg __fpu_xmm4;
struct __darwin_xmm_reg __fpu_xmm5;
struct __darwin_xmm_reg __fpu_xmm6;
struct __darwin_xmm_reg __fpu_xmm7;
char __fpu_rsrv4[14 * 16];
int __fpu_reserved1;
} ;
struct __darwin_i386_avx_state {
int __fpu_reserved[2];
struct __darwin_fp_control __fpu_fcw;
struct __darwin_fp_status __fpu_fsw;
__uint8_t __fpu_ftw;
__uint8_t __fpu_rsrv1;
__uint16_t __fpu_fop;
__uint32_t __fpu_ip;
__uint16_t __fpu_cs;
__uint16_t __fpu_rsrv2;
__uint32_t __fpu_dp;
__uint16_t __fpu_ds;
__uint16_t __fpu_rsrv3;
__uint32_t __fpu_mxcsr;
__uint32_t __fpu_mxcsrmask;
struct __darwin_mmst_reg __fpu_stmm0;
struct __darwin_mmst_reg __fpu_stmm1;
struct __darwin_mmst_reg __fpu_stmm2;
struct __darwin_mmst_reg __fpu_stmm3;
struct __darwin_mmst_reg __fpu_stmm4;
struct __darwin_mmst_reg __fpu_stmm5;
struct __darwin_mmst_reg __fpu_stmm6;
struct __darwin_mmst_reg __fpu_stmm7;
struct __darwin_xmm_reg __fpu_xmm0;
struct __darwin_xmm_reg __fpu_xmm1;
struct __darwin_xmm_reg __fpu_xmm2;
struct __darwin_xmm_reg __fpu_xmm3;
struct __darwin_xmm_reg __fpu_xmm4;
struct __darwin_xmm_reg __fpu_xmm5;
struct __darwin_xmm_reg __fpu_xmm6;
struct __darwin_xmm_reg __fpu_xmm7;
char __fpu_rsrv4[14 * 16];
int __fpu_reserved1;
char __avx_reserved1[64];
struct __darwin_xmm_reg __fpu_ymmh0;
struct __darwin_xmm_reg __fpu_ymmh1;
struct __darwin_xmm_reg __fpu_ymmh2;
struct __darwin_xmm_reg __fpu_ymmh3;
struct __darwin_xmm_reg __fpu_ymmh4;
struct __darwin_xmm_reg __fpu_ymmh5;
struct __darwin_xmm_reg __fpu_ymmh6;
struct __darwin_xmm_reg __fpu_ymmh7;
} ;
struct __darwin_i386_exception_state {
__uint16_t __trapno;
__uint16_t __cpu;
__uint32_t __err;
__uint32_t __faultvaddr;
} ;
struct __darwin_x86_debug_state32 {
unsigned int __dr0;
unsigned int __dr1;
unsigned int __dr2;
unsigned int __dr3;
unsigned int __dr4;
unsigned int __dr5;
unsigned int __dr6;
unsigned int __dr7;
} ;
struct __darwin_x86_thread_state64 {
__uint64_t __rax;
__uint64_t __rbx;
__uint64_t __rcx;
__uint64_t __rdx;
__uint64_t __rdi;
__uint64_t __rsi;
__uint64_t __rbp;
__uint64_t __rsp;
__uint64_t __r8;
__uint64_t __r9;
__uint64_t __r10;
__uint64_t __r11;
__uint64_t __r12;
__uint64_t __r13;
__uint64_t __r14;
__uint64_t __r15;
__uint64_t __rip;
__uint64_t __rflags;
__uint64_t __cs;
__uint64_t __fs;
__uint64_t __gs;
} ;
struct __darwin_x86_float_state64 {
int __fpu_reserved[2];
struct __darwin_fp_control __fpu_fcw;
struct __darwin_fp_status __fpu_fsw;
__uint8_t __fpu_ftw;
__uint8_t __fpu_rsrv1;
__uint16_t __fpu_fop;
__uint32_t __fpu_ip;
__uint16_t __fpu_cs;
__uint16_t __fpu_rsrv2;
__uint32_t __fpu_dp;
__uint16_t __fpu_ds;
__uint16_t __fpu_rsrv3;
__uint32_t __fpu_mxcsr;
__uint32_t __fpu_mxcsrmask;
struct __darwin_mmst_reg __fpu_stmm0;
struct __darwin_mmst_reg __fpu_stmm1;
struct __darwin_mmst_reg __fpu_stmm2;
struct __darwin_mmst_reg __fpu_stmm3;
struct __darwin_mmst_reg __fpu_stmm4;
struct __darwin_mmst_reg __fpu_stmm5;
struct __darwin_mmst_reg __fpu_stmm6;
struct __darwin_mmst_reg __fpu_stmm7;
struct __darwin_xmm_reg __fpu_xmm0;
struct __darwin_xmm_reg __fpu_xmm1;
struct __darwin_xmm_reg __fpu_xmm2;
struct __darwin_xmm_reg __fpu_xmm3;
struct __darwin_xmm_reg __fpu_xmm4;
struct __darwin_xmm_reg __fpu_xmm5;
struct __darwin_xmm_reg __fpu_xmm6;
struct __darwin_xmm_reg __fpu_xmm7;
struct __darwin_xmm_reg __fpu_xmm8;
struct __darwin_xmm_reg __fpu_xmm9;
struct __darwin_xmm_reg __fpu_xmm10;
struct __darwin_xmm_reg __fpu_xmm11;
struct __darwin_xmm_reg __fpu_xmm12;
struct __darwin_xmm_reg __fpu_xmm13;
struct __darwin_xmm_reg __fpu_xmm14;
struct __darwin_xmm_reg __fpu_xmm15;
char __fpu_rsrv4[6 * 16];
int __fpu_reserved1;
} ;
struct __darwin_x86_avx_state64 {
int __fpu_reserved[2];
struct __darwin_fp_control __fpu_fcw;
struct __darwin_fp_status __fpu_fsw;
__uint8_t __fpu_ftw;
__uint8_t __fpu_rsrv1;
__uint16_t __fpu_fop;
__uint32_t __fpu_ip;
__uint16_t __fpu_cs;
__uint16_t __fpu_rsrv2;
__uint32_t __fpu_dp;
__uint16_t __fpu_ds;
__uint16_t __fpu_rsrv3;
__uint32_t __fpu_mxcsr;
__uint32_t __fpu_mxcsrmask;
struct __darwin_mmst_reg __fpu_stmm0;
struct __darwin_mmst_reg __fpu_stmm1;
struct __darwin_mmst_reg __fpu_stmm2;
struct __darwin_mmst_reg __fpu_stmm3;
struct __darwin_mmst_reg __fpu_stmm4;
struct __darwin_mmst_reg __fpu_stmm5;
struct __darwin_mmst_reg __fpu_stmm6;
struct __darwin_mmst_reg __fpu_stmm7;
struct __darwin_xmm_reg __fpu_xmm0;
struct __darwin_xmm_reg __fpu_xmm1;
struct __darwin_xmm_reg __fpu_xmm2;
struct __darwin_xmm_reg __fpu_xmm3;
struct __darwin_xmm_reg __fpu_xmm4;
struct __darwin_xmm_reg __fpu_xmm5;
struct __darwin_xmm_reg __fpu_xmm6;
struct __darwin_xmm_reg __fpu_xmm7;
struct __darwin_xmm_reg __fpu_xmm8;
struct __darwin_xmm_reg __fpu_xmm9;
struct __darwin_xmm_reg __fpu_xmm10;
struct __darwin_xmm_reg __fpu_xmm11;
struct __darwin_xmm_reg __fpu_xmm12;
struct __darwin_xmm_reg __fpu_xmm13;
struct __darwin_xmm_reg __fpu_xmm14;
struct __darwin_xmm_reg __fpu_xmm15;
char __fpu_rsrv4[6 * 16];
int __fpu_reserved1;
char __avx_reserved1[64];
struct __darwin_xmm_reg __fpu_ymmh0;
struct __darwin_xmm_reg __fpu_ymmh1;
struct __darwin_xmm_reg __fpu_ymmh2;
struct __darwin_xmm_reg __fpu_ymmh3;
struct __darwin_xmm_reg __fpu_ymmh4;
struct __darwin_xmm_reg __fpu_ymmh5;
struct __darwin_xmm_reg __fpu_ymmh6;
struct __darwin_xmm_reg __fpu_ymmh7;
struct __darwin_xmm_reg __fpu_ymmh8;
struct __darwin_xmm_reg __fpu_ymmh9;
struct __darwin_xmm_reg __fpu_ymmh10;
struct __darwin_xmm_reg __fpu_ymmh11;
struct __darwin_xmm_reg __fpu_ymmh12;
struct __darwin_xmm_reg __fpu_ymmh13;
struct __darwin_xmm_reg __fpu_ymmh14;
struct __darwin_xmm_reg __fpu_ymmh15;
} ;
struct __darwin_x86_exception_state64 {
__uint16_t __trapno;
__uint16_t __cpu;
__uint32_t __err;
__uint64_t __faultvaddr;
} ;
struct __darwin_x86_debug_state64 {
__uint64_t __dr0;
__uint64_t __dr1;
__uint64_t __dr2;
__uint64_t __dr3;
__uint64_t __dr4;
__uint64_t __dr5;
__uint64_t __dr6;
__uint64_t __dr7;
} ;
struct __darwin_mcontext32 {
struct __darwin_i386_exception_state __es;
struct __darwin_i386_thread_state __ss;
struct __darwin_i386_float_state __fs;
} ;
struct __darwin_mcontext_avx32 {
struct __darwin_i386_exception_state __es;
struct __darwin_i386_thread_state __ss;
struct __darwin_i386_avx_state __fs;
} ;
struct __darwin_mcontext64 {
struct __darwin_x86_exception_state64 __es;
struct __darwin_x86_thread_state64 __ss;
struct __darwin_x86_float_state64 __fs;
} ;
struct __darwin_mcontext_avx64 {
struct __darwin_x86_exception_state64 __es;
struct __darwin_x86_thread_state64 __ss;
struct __darwin_x86_avx_state64 __fs;
} ;
typedef struct __darwin_mcontext64 *mcontext_t;
typedef __darwin_pthread_attr_t pthread_attr_t;
struct __darwin_sigaltstack {
void *ss_sp;
__darwin_size_t ss_size;
int ss_flags;
} ;
typedef struct __darwin_sigaltstack stack_t;
struct __darwin_ucontext {
int uc_onstack;
__darwin_sigset_t uc_sigmask;
struct __darwin_sigaltstack uc_stack;
struct __darwin_ucontext *uc_link;
__darwin_size_t uc_mcsize;
struct __darwin_mcontext64 *uc_mcontext;
} ;
typedef struct __darwin_ucontext ucontext_t;
typedef __darwin_sigset_t sigset_t;
typedef __darwin_uid_t uid_t;
union sigval {
int sival_int;
void *sival_ptr;
} ;
struct sigevent {
int sigev_notify;
int sigev_signo;
union sigval sigev_value;
void ( *sigev_notify_function )(union sigval );
pthread_attr_t *sigev_notify_attributes;
} ;
struct __siginfo {
int si_signo;
int si_errno;
int si_code;
pid_t si_pid;
uid_t si_uid;
int si_status;
void *si_addr;
union sigval si_value;
long si_band;
unsigned long __pad[7];
} ;
typedef struct __siginfo siginfo_t;
union __sigaction_u {
void ( *__sa_handler )(int );
void ( *__sa_sigaction )(int , struct __siginfo * , void *);
} ;
struct __sigaction {
union __sigaction_u __sigaction_u;
void ( *sa_tramp )(void *, int , int , siginfo_t * , void *);
sigset_t sa_mask;
int sa_flags;
} ;
struct sigaction {
union __sigaction_u __sigaction_u;
sigset_t sa_mask;
int sa_flags;
} ;
typedef void ( *sig_t )(int );
struct sigvec {
void ( *sv_handler )(int );
int sv_mask;
int sv_flags;
} ;
struct sigstack {
char *ss_sp;
int ss_onstack;
} ;
typedef signed char int8_t;
typedef short int16_t;
typedef int int32_t;
typedef long long int64_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
typedef unsigned long long uint64_t;
typedef int8_t int_least8_t;
typedef int16_t int_least16_t;
typedef int32_t int_least32_t;
typedef int64_t int_least64_t;
typedef uint8_t uint_least8_t;
typedef uint16_t uint_least16_t;
typedef uint32_t uint_least32_t;
typedef uint64_t uint_least64_t;
typedef int8_t int_fast8_t;
typedef int16_t int_fast16_t;
typedef int32_t int_fast32_t;
typedef int64_t int_fast64_t;
typedef uint8_t uint_fast8_t;
typedef uint16_t uint_fast16_t;
typedef uint32_t uint_fast32_t;
typedef uint64_t uint_fast64_t;
typedef __darwin_intptr_t intptr_t;
typedef unsigned long uintptr_t;
typedef long int intmax_t;
typedef long unsigned int uintmax_t;
struct timeval {
__darwin_time_t tv_sec;
__darwin_suseconds_t tv_usec;
} ;
typedef __uint64_t rlim_t;
struct rusage {
struct timeval ru_utime;
struct timeval ru_stime;
long ru_maxrss;
long ru_ixrss;
long ru_idrss;
long ru_isrss;
long ru_minflt;
long ru_majflt;
long ru_nswap;
long ru_inblock;
long ru_oublock;
long ru_msgsnd;
long ru_msgrcv;
long ru_nsignals;
long ru_nvcsw;
long ru_nivcsw;
} ;
typedef void *rusage_info_t;
struct rusage_info_v0 {
uint8_t ri_uuid[16];
uint64_t ri_user_time;
uint64_t ri_system_time;
uint64_t ri_pkg_idle_wkups;
uint64_t ri_interrupt_wkups;
uint64_t ri_pageins;
uint64_t ri_wired_size;
uint64_t ri_resident_size;
uint64_t ri_phys_footprint;
uint64_t ri_proc_start_abstime;
uint64_t ri_proc_exit_abstime;
} ;
struct rusage_info_v1 {
uint8_t ri_uuid[16];
uint64_t ri_user_time;
uint64_t ri_system_time;
uint64_t ri_pkg_idle_wkups;
uint64_t ri_interrupt_wkups;
uint64_t ri_pageins;
uint64_t ri_wired_size;
uint64_t ri_resident_size;
uint64_t ri_phys_footprint;
uint64_t ri_proc_start_abstime;
uint64_t ri_proc_exit_abstime;
uint64_t ri_child_user_time;
uint64_t ri_child_system_time;
uint64_t ri_child_pkg_idle_wkups;
uint64_t ri_child_interrupt_wkups;
uint64_t ri_child_pageins;
uint64_t ri_child_elapsed_abstime;
} ;
struct rusage_info_v2 {
uint8_t ri_uuid[16];
uint64_t ri_user_time;
uint64_t ri_system_time;
uint64_t ri_pkg_idle_wkups;
uint64_t ri_interrupt_wkups;
uint64_t ri_pageins;
uint64_t ri_wired_size;
uint64_t ri_resident_size;
uint64_t ri_phys_footprint;
uint64_t ri_proc_start_abstime;
uint64_t ri_proc_exit_abstime;
uint64_t ri_child_user_time;
uint64_t ri_child_system_time;
uint64_t ri_child_pkg_idle_wkups;
uint64_t ri_child_interrupt_wkups;
uint64_t ri_child_pageins;
uint64_t ri_child_elapsed_abstime;
uint64_t ri_diskio_bytesread;
uint64_t ri_diskio_byteswritten;
} ;
struct rusage_info_v3 {
uint8_t ri_uuid[16];
uint64_t ri_user_time;
uint64_t ri_system_time;
uint64_t ri_pkg_idle_wkups;
uint64_t ri_interrupt_wkups;
uint64_t ri_pageins;
uint64_t ri_wired_size;
uint64_t ri_resident_size;
uint64_t ri_phys_footprint;
uint64_t ri_proc_start_abstime;
uint64_t ri_proc_exit_abstime;
uint64_t ri_child_user_time;
uint64_t ri_child_system_time;
uint64_t ri_child_pkg_idle_wkups;
uint64_t ri_child_interrupt_wkups;
uint64_t ri_child_pageins;
uint64_t ri_child_elapsed_abstime;
uint64_t ri_diskio_bytesread;
uint64_t ri_diskio_byteswritten;
uint64_t ri_cpu_time_qos_default;
uint64_t ri_cpu_time_qos_maintenance;
uint64_t ri_cpu_time_qos_background;
uint64_t ri_cpu_time_qos_utility;
uint64_t ri_cpu_time_qos_legacy;
uint64_t ri_cpu_time_qos_user_initiated;
uint64_t ri_cpu_time_qos_user_interactive;
uint64_t ri_billed_system_time;
uint64_t ri_serviced_system_time;
} ;
typedef struct rusage_info_v3 rusage_info_current;
struct rlimit {
rlim_t rlim_cur;
rlim_t rlim_max;
} ;
struct proc_rlimit_control_wakeupmon {
uint32_t wm_flags;
int32_t wm_rate;
} ;
union wait {
int w_status;
struct stUn_imopVarPre2 {
unsigned int w_Termsig: 7, w_Coredump: 1 , w_Retcode: 8 , w_Filler: 16;
} w_T;
struct stUn_imopVarPre3 {
unsigned int w_Stopval: 8, w_Stopsig: 8 , w_Filler: 16;
} w_S;
} ;
typedef __darwin_ct_rune_t ct_rune_t;
typedef __darwin_rune_t rune_t;
typedef __darwin_wchar_t wchar_t;
struct stUn_imopVarPre4 {
int quot;
int rem;
} ;
typedef struct stUn_imopVarPre4 div_t;
struct stUn_imopVarPre5 {
long quot;
long rem;
} ;
typedef struct stUn_imopVarPre5 ldiv_t;
struct stUn_imopVarPre6 {
long long quot;
long long rem;
} ;
typedef struct stUn_imopVarPre6 lldiv_t;
void exit(int );
typedef unsigned char u_int8_t;
typedef unsigned short u_int16_t;
typedef unsigned int u_int32_t;
typedef unsigned long long u_int64_t;
typedef int64_t register_t;
typedef u_int64_t user_addr_t;
typedef u_int64_t user_size_t;
typedef int64_t user_ssize_t;
typedef int64_t user_long_t;
typedef u_int64_t user_ulong_t;
typedef int64_t user_time_t;
typedef int64_t user_off_t;
typedef u_int64_t syscall_arg_t;
typedef __darwin_dev_t dev_t;
typedef __darwin_mode_t mode_t;
typedef float float_t;
typedef double double_t;
extern double fabs(double );
extern double sqrt(double );
struct __float2 {
float __sinval;
float __cosval;
} ;
struct __double2 {
double __sinval;
double __cosval;
} ;
struct exception {
int type;
char *name;
double arg1;
double arg2;
double retval;
} ;
typedef int boolean;
struct stUn_imopVarPre11 {
double real;
double imag;
} ;
typedef struct stUn_imopVarPre11 dcomplex;
extern void timer_clear(int );
extern void timer_start(int );
extern void timer_stop(int );
extern double timer_read(int );
extern void c_print_results(char *name, char class , int n1 , int n2 , int n3 , int niter , int nthreads , double t , double mops , char *optype , int passed_verification , char *npbversion , char *compiletime , char *cc , char *clink , char *c_lib , char *c_inc , char *cflags , char *clinkflags , char *rand);
static int grid_points[3];
static double tx1;
static double tx2;
static double tx3;
static double ty1;
static double ty2;
static double ty3;
static double tz1;
static double tz2;
static double tz3;
static double dx1;
static double dx2;
static double dx3;
static double dx4;
static double dx5;
static double dy1;
static double dy2;
static double dy3;
static double dy4;
static double dy5;
static double dz1;
static double dz2;
static double dz3;
static double dz4;
static double dz5;
static double dssp;
static double dt;
static double ce[5][13];
static double dxmax;
static double dymax;
static double dzmax;
static double xxcon1;
static double xxcon2;
static double xxcon3;
static double xxcon4;
static double xxcon5;
static double dx1tx1;
static double dx2tx1;
static double dx3tx1;
static double dx4tx1;
static double dx5tx1;
static double yycon1;
static double yycon2;
static double yycon3;
static double yycon4;
static double yycon5;
static double dy1ty1;
static double dy2ty1;
static double dy3ty1;
static double dy4ty1;
static double dy5ty1;
static double zzcon1;
static double zzcon2;
static double zzcon3;
static double zzcon4;
static double zzcon5;
static double dz1tz1;
static double dz2tz1;
static double dz3tz1;
static double dz4tz1;
static double dz5tz1;
static double dnxm1;
static double dnym1;
static double dnzm1;
static double c1c2;
static double c1c5;
static double c3c4;
static double c1345;
static double conz1;
static double c1;
static double c2;
static double c3;
static double c4;
static double c5;
static double c4dssp;
static double c5dssp;
static double dtdssp;
static double dttx1;
static double dttx2;
static double dtty1;
static double dtty2;
static double dttz1;
static double dttz2;
static double c2dttx1;
static double c2dtty1;
static double c2dttz1;
static double comz1;
static double comz4;
static double comz5;
static double comz6;
static double c3c4tx3;
static double c3c4ty3;
static double c3c4tz3;
static double c2iv;
static double con43;
static double con16;
static double us[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1];
static double vs[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1];
static double ws[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1];
static double qs[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1];
static double rho_i[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1];
static double square[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1];
static double forcing[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5 + 1];
static double u[(12 + 1) / 2 * 2 + 1][(12 + 1) / 2 * 2 + 1][(12 + 1) / 2 * 2 + 1][5];
static double rhs[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
static double lhs[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1][3][5][5];
static double cuf[12];
static double q[12];
static double ue[12][5];
static double buf[12][5];
#pragma omp threadprivate(cuf, q, ue, buf)
static double fjac[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 - 1 + 1][5][5];
static double njac[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 - 1 + 1][5][5];
static double tmp1;
static double tmp2;
static double tmp3;
static void add(void );
static void error_norm(double rms[5]);
static void rhs_norm(double rms[5]);
static void exact_solution(double xi, double eta , double zeta , double dtemp[5]);
static void set_constants(void );
static void matvec_sub(double ablock[5][5], double avec[5] , double bvec[5]);
static void matmul_sub(double ablock[5][5], double bblock[5][5] , double cblock[5][5]);
static void binvcrhs(double lhs[5][5], double c[5][5] , double r[5]);
static void binvrhs(double lhs[5][5], double r[5]);
int main(int argc, char **argv) {
int step;
int n3;
int nthreads = 1;
double navg;
int no_time_steps;
char *class_imopVar19;
int *verified_imopVar20;
double xcrref[5];
double xceref[5];
double xcrdif[5];
double xcedif[5];
double epsilon;
double xce[5];
double xcr[5];
double dtref;
int m_imopVar18;
double mflops;
double tmax;
boolean verified;
char class;
int *_imopVarPre175;
char *_imopVarPre176;
int niter;
#pragma omp parallel
{
FILE *fp;
#pragma omp master
{
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - BT Benchmark\n\n");
fp = fopen("inputbt.data", "r");
if (fp != ((void *) 0)) {
printf(" Reading from input file inputbt.data");
int *_imopVarPre145;
_imopVarPre145 = &niter;
fscanf(fp, "%d", _imopVarPre145);
int _imopVarPre147;
_imopVarPre147 = fgetc(fp);
while (_imopVarPre147 != '\n') {
;
_imopVarPre147 = fgetc(fp);
}
double *_imopVarPre149;
_imopVarPre149 = &dt;
fscanf(fp, "%lg", _imopVarPre149);
int _imopVarPre151;
_imopVarPre151 = fgetc(fp);
while (_imopVarPre151 != '\n') {
;
_imopVarPre151 = fgetc(fp);
}
int *_imopVarPre155;
int *_imopVarPre156;
int *_imopVarPre157;
_imopVarPre155 = &grid_points[2];
_imopVarPre156 = &grid_points[1];
_imopVarPre157 = &grid_points[0];
fscanf(fp, "%d%d%d", _imopVarPre157, _imopVarPre156, _imopVarPre155);
fclose(fp);
} else {
printf(" No input file inputbt.data. Using compiled defaults\n");
niter = 60;
dt = 0.010;
grid_points[0] = 12;
grid_points[1] = 12;
grid_points[2] = 12;
}
}
int _imopVarPre161;
int _imopVarPre162;
int _imopVarPre163;
#pragma omp master
{
_imopVarPre161 = grid_points[2];
_imopVarPre162 = grid_points[1];
_imopVarPre163 = grid_points[0];
printf(" Size: %3dx%3dx%3d\n", _imopVarPre163, _imopVarPre162, _imopVarPre161);
printf(" Iterations: %3d dt: %10.6f\n", niter, dt);
}
int _imopVarPre164;
int _imopVarPre165;
#pragma omp master
{
_imopVarPre164 = grid_points[0] > 12;
if (!_imopVarPre164) {
_imopVarPre165 = grid_points[1] > 12;
if (!_imopVarPre165) {
_imopVarPre165 = grid_points[2] > 12;
}
_imopVarPre164 = _imopVarPre165;
}
if (_imopVarPre164) {
int _imopVarPre169;
int _imopVarPre170;
int _imopVarPre171;
_imopVarPre169 = grid_points[2];
_imopVarPre170 = grid_points[1];
_imopVarPre171 = grid_points[0];
printf(" %dx%dx%d\n", _imopVarPre171, _imopVarPre170, _imopVarPre169);
printf(" Problem size too big for compiled array sizes\n");
exit(1);
}
set_constants();
}
int i;
int j;
int k;
int m;
int ix;
int iy;
int iz;
double xi;
double eta;
double zeta;
double Pface[2][3][5];
double Pxi;
double Peta;
double Pzeta;
double temp[5];
#pragma omp for nowait
for (i = 0; i < 12; i++) {
for (j = 0; j < 12; j++) {
for (k = 0; k < 12; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = 1.0;
}
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double) i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double) j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double) k * dnzm1;
for (ix = 0; ix < 2; ix++) {
double *_imopVarPre191;
double _imopVarPre192;
_imopVarPre191 = &(Pface[ix][0][0]);
_imopVarPre192 = (double) ix;
exact_solution(_imopVarPre192, eta, zeta, _imopVarPre191);
}
for (iy = 0; iy < 2; iy++) {
double *_imopVarPre195;
double _imopVarPre196;
_imopVarPre195 = &Pface[iy][1][0];
_imopVarPre196 = (double) iy;
exact_solution(xi, _imopVarPre196, zeta, _imopVarPre195);
}
for (iz = 0; iz < 2; iz++) {
double *_imopVarPre199;
double _imopVarPre200;
_imopVarPre199 = &Pface[iz][2][0];
_imopVarPre200 = (double) iz;
exact_solution(xi, eta, _imopVarPre200, _imopVarPre199);
}
for (m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] + (1.0 - xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] + (1.0 - eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] + (1.0 - zeta) * Pface[0][2][m];
u[i][j][k][m] = Pxi + Peta + Pzeta - Pxi * Peta - Pxi * Pzeta - Peta * Pzeta + Pxi * Peta * Pzeta;
}
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
i = 0;
xi = 0.0;
#pragma omp for nowait
for (j = 0; j < grid_points[1]; j++) {
eta = (double) j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double) k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
i = grid_points[0] - 1;
xi = 1.0;
#pragma omp for nowait
for (j = 0; j < grid_points[1]; j++) {
eta = (double) j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double) k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
j = 0;
eta = 0.0;
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double) i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double) k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
j = grid_points[1] - 1;
eta = 1.0;
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double) i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double) k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
k = 0;
zeta = 0.0;
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double) i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double) j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
k = grid_points[2] - 1;
zeta = 1.0;
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double) i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double) j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
{
int i;
int j;
int k;
int m;
int n;
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
for (n = 0; n < 5; n++) {
lhs[i][j][k][0][m][n] = 0.0;
lhs[i][j][k][1][m][n] = 0.0;
lhs[i][j][k][2][m][n] = 0.0;
}
}
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
lhs[i][j][k][1][m][m] = 1.0;
}
}
}
}
}
{
double dtemp[5];
double xi;
double eta;
double zeta;
double dtpp;
int m;
int i;
int j;
int k;
int ip1;
int im1;
int jp1;
int jm1;
int km1;
int kp1;
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
forcing[i][j][k][m] = 0.0;
}
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
eta = (double) j * dnym1;
for (k = 1; k < grid_points[2] - 1; k++) {
zeta = (double) k * dnzm1;
for (i = 0; i < grid_points[0]; i++) {
xi = (double) i * dnxm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[i][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m <= 4; m++) {
buf[i][m] = dtpp * dtemp[m];
}
cuf[i] = buf[i][1] * buf[i][1];
buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3];
q[i] = 0.5 * (buf[i][1] * ue[i][1] + buf[i][2] * ue[i][2] + buf[i][3] * ue[i][3]);
}
for (i = 1; i < grid_points[0] - 1; i++) {
im1 = i - 1;
ip1 = i + 1;
forcing[i][j][k][0] = forcing[i][j][k][0] - tx2 * (ue[ip1][1] - ue[im1][1]) + dx1tx1 * (ue[ip1][0] - 2.0 * ue[i][0] + ue[im1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] - tx2 * ((ue[ip1][1] * buf[ip1][1] + c2 * (ue[ip1][4] - q[ip1])) - (ue[im1][1] * buf[im1][1] + c2 * (ue[im1][4] - q[im1]))) + xxcon1 * (buf[ip1][1] - 2.0 * buf[i][1] + buf[im1][1]) + dx2tx1 * (ue[ip1][1] - 2.0 * ue[i][1] + ue[im1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] - tx2 * (ue[ip1][2] * buf[ip1][1] - ue[im1][2] * buf[im1][1]) + xxcon2 * (buf[ip1][2] - 2.0 * buf[i][2] + buf[im1][2]) + dx3tx1 * (ue[ip1][2] - 2.0 * ue[i][2] + ue[im1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] - tx2 * (ue[ip1][3] * buf[ip1][1] - ue[im1][3] * buf[im1][1]) + xxcon2 * (buf[ip1][3] - 2.0 * buf[i][3] + buf[im1][3]) + dx4tx1 * (ue[ip1][3] - 2.0 * ue[i][3] + ue[im1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] - tx2 * (buf[ip1][1] * (c1 * ue[ip1][4] - c2 * q[ip1]) - buf[im1][1] * (c1 * ue[im1][4] - c2 * q[im1])) + 0.5 * xxcon3 * (buf[ip1][0] - 2.0 * buf[i][0] + buf[im1][0]) + xxcon4 * (cuf[ip1] - 2.0 * cuf[i] + cuf[im1]) + xxcon5 * (buf[ip1][4] - 2.0 * buf[i][4] + buf[im1][4]) + dx5tx1 * (ue[ip1][4] - 2.0 * ue[i][4] + ue[im1][4]);
}
for (m = 0; m < 5; m++) {
i = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);
i = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);
}
for (m = 0; m < 5; m++) {
for (i = 1 * 3; i <= grid_points[0] - 3 * 1 - 1; i++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);
}
}
for (m = 0; m < 5; m++) {
i = grid_points[0] - 3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m]);
i = grid_points[0] - 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 5.0 * ue[i][m]);
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
xi = (double) i * dnxm1;
for (k = 1; k < grid_points[2] - 1; k++) {
zeta = (double) k * dnzm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double) j * dnym1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[j][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m <= 4; m++) {
buf[j][m] = dtpp * dtemp[m];
}
cuf[j] = buf[j][2] * buf[j][2];
buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3];
q[j] = 0.5 * (buf[j][1] * ue[j][1] + buf[j][2] * ue[j][2] + buf[j][3] * ue[j][3]);
}
for (j = 1; j < grid_points[1] - 1; j++) {
jm1 = j - 1;
jp1 = j + 1;
forcing[i][j][k][0] = forcing[i][j][k][0] - ty2 * (ue[jp1][2] - ue[jm1][2]) + dy1ty1 * (ue[jp1][0] - 2.0 * ue[j][0] + ue[jm1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] - ty2 * (ue[jp1][1] * buf[jp1][2] - ue[jm1][1] * buf[jm1][2]) + yycon2 * (buf[jp1][1] - 2.0 * buf[j][1] + buf[jm1][1]) + dy2ty1 * (ue[jp1][1] - 2.0 * ue[j][1] + ue[jm1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] - ty2 * ((ue[jp1][2] * buf[jp1][2] + c2 * (ue[jp1][4] - q[jp1])) - (ue[jm1][2] * buf[jm1][2] + c2 * (ue[jm1][4] - q[jm1]))) + yycon1 * (buf[jp1][2] - 2.0 * buf[j][2] + buf[jm1][2]) + dy3ty1 * (ue[jp1][2] - 2.0 * ue[j][2] + ue[jm1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] - ty2 * (ue[jp1][3] * buf[jp1][2] - ue[jm1][3] * buf[jm1][2]) + yycon2 * (buf[jp1][3] - 2.0 * buf[j][3] + buf[jm1][3]) + dy4ty1 * (ue[jp1][3] - 2.0 * ue[j][3] + ue[jm1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] - ty2 * (buf[jp1][2] * (c1 * ue[jp1][4] - c2 * q[jp1]) - buf[jm1][2] * (c1 * ue[jm1][4] - c2 * q[jm1])) + 0.5 * yycon3 * (buf[jp1][0] - 2.0 * buf[j][0] + buf[jm1][0]) + yycon4 * (cuf[jp1] - 2.0 * cuf[j] + cuf[jm1]) + yycon5 * (buf[jp1][4] - 2.0 * buf[j][4] + buf[jm1][4]) + dy5ty1 * (ue[jp1][4] - 2.0 * ue[j][4] + ue[jm1][4]);
}
for (m = 0; m < 5; m++) {
j = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);
j = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);
}
for (m = 0; m < 5; m++) {
for (j = 1 * 3; j <= grid_points[1] - 3 * 1 - 1; j++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);
}
}
for (m = 0; m < 5; m++) {
j = grid_points[1] - 3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m]);
j = grid_points[1] - 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 5.0 * ue[j][m]);
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
xi = (double) i * dnxm1;
for (j = 1; j < grid_points[1] - 1; j++) {
eta = (double) j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double) k * dnzm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[k][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m <= 4; m++) {
buf[k][m] = dtpp * dtemp[m];
}
cuf[k] = buf[k][3] * buf[k][3];
buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2];
q[k] = 0.5 * (buf[k][1] * ue[k][1] + buf[k][2] * ue[k][2] + buf[k][3] * ue[k][3]);
}
for (k = 1; k < grid_points[2] - 1; k++) {
km1 = k - 1;
kp1 = k + 1;
forcing[i][j][k][0] = forcing[i][j][k][0] - tz2 * (ue[kp1][3] - ue[km1][3]) + dz1tz1 * (ue[kp1][0] - 2.0 * ue[k][0] + ue[km1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] - tz2 * (ue[kp1][1] * buf[kp1][3] - ue[km1][1] * buf[km1][3]) + zzcon2 * (buf[kp1][1] - 2.0 * buf[k][1] + buf[km1][1]) + dz2tz1 * (ue[kp1][1] - 2.0 * ue[k][1] + ue[km1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] - tz2 * (ue[kp1][2] * buf[kp1][3] - ue[km1][2] * buf[km1][3]) + zzcon2 * (buf[kp1][2] - 2.0 * buf[k][2] + buf[km1][2]) + dz3tz1 * (ue[kp1][2] - 2.0 * ue[k][2] + ue[km1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] - tz2 * ((ue[kp1][3] * buf[kp1][3] + c2 * (ue[kp1][4] - q[kp1])) - (ue[km1][3] * buf[km1][3] + c2 * (ue[km1][4] - q[km1]))) + zzcon1 * (buf[kp1][3] - 2.0 * buf[k][3] + buf[km1][3]) + dz4tz1 * (ue[kp1][3] - 2.0 * ue[k][3] + ue[km1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] - tz2 * (buf[kp1][3] * (c1 * ue[kp1][4] - c2 * q[kp1]) - buf[km1][3] * (c1 * ue[km1][4] - c2 * q[km1])) + 0.5 * zzcon3 * (buf[kp1][0] - 2.0 * buf[k][0] + buf[km1][0]) + zzcon4 * (cuf[kp1] - 2.0 * cuf[k] + cuf[km1]) + zzcon5 * (buf[kp1][4] - 2.0 * buf[k][4] + buf[km1][4]) + dz5tz1 * (ue[kp1][4] - 2.0 * ue[k][4] + ue[km1][4]);
}
for (m = 0; m < 5; m++) {
k = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);
k = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);
}
for (m = 0; m < 5; m++) {
for (k = 1 * 3; k <= grid_points[2] - 3 * 1 - 1; k++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);
}
}
for (m = 0; m < 5; m++) {
k = grid_points[2] - 3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m]);
k = grid_points[2] - 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 5.0 * ue[k][m]);
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m];
}
}
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
{
int i;
int j;
int k;
int m;
double rho_inv;
double uijk;
double up1;
double um1;
double vijk;
double vp1;
double vm1;
double wijk;
double wp1;
double wm1;
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
rho_inv = 1.0 / u[i][j][k][0];
rho_i[i][j][k] = rho_inv;
us[i][j][k] = u[i][j][k][1] * rho_inv;
vs[i][j][k] = u[i][j][k][2] * rho_inv;
ws[i][j][k] = u[i][j][k][3] * rho_inv;
square[i][j][k] = 0.5 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * rho_inv;
qs[i][j][k] = square[i][j][k] * rho_inv;
}
}
}
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = forcing[i][j][k][m];
}
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
uijk = us[i][j][k];
up1 = us[i + 1][j][k];
um1 = us[i - 1][j][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * (u[i + 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i - 1][j][k][0]) - tx2 * (u[i + 1][j][k][1] - u[i - 1][j][k][1]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * (u[i + 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i - 1][j][k][1]) + xxcon2 * con43 * (up1 - 2.0 * uijk + um1) - tx2 * (u[i + 1][j][k][1] * up1 - u[i - 1][j][k][1] * um1 + (u[i + 1][j][k][4] - square[i + 1][j][k] - u[i - 1][j][k][4] + square[i - 1][j][k]) * c2);
rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * (u[i + 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i - 1][j][k][2]) + xxcon2 * (vs[i + 1][j][k] - 2.0 * vs[i][j][k] + vs[i - 1][j][k]) - tx2 * (u[i + 1][j][k][2] * up1 - u[i - 1][j][k][2] * um1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * (u[i + 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i - 1][j][k][3]) + xxcon2 * (ws[i + 1][j][k] - 2.0 * ws[i][j][k] + ws[i - 1][j][k]) - tx2 * (u[i + 1][j][k][3] * up1 - u[i - 1][j][k][3] * um1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * (u[i + 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i - 1][j][k][4]) + xxcon3 * (qs[i + 1][j][k] - 2.0 * qs[i][j][k] + qs[i - 1][j][k]) + xxcon4 * (up1 * up1 - 2.0 * uijk * uijk + um1 * um1) + xxcon5 * (u[i + 1][j][k][4] * rho_i[i + 1][j][k] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i - 1][j][k][4] * rho_i[i - 1][j][k]) - tx2 * ((c1 * u[i + 1][j][k][4] - c2 * square[i + 1][j][k]) * up1 - (c1 * u[i - 1][j][k][4] - c2 * square[i - 1][j][k]) * um1);
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
i = 1;
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (5.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);
}
}
}
i = 2;
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);
}
}
}
#pragma omp for nowait
for (i = 3; i < grid_points[0] - 3; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);
}
}
}
}
i = grid_points[0] - 3;
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m]);
}
}
}
i = grid_points[0] - 2;
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4. * u[i - 1][j][k][m] + 5.0 * u[i][j][k][m]);
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
vijk = vs[i][j][k];
vp1 = vs[i][j + 1][k];
vm1 = vs[i][j - 1][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * (u[i][j + 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j - 1][k][0]) - ty2 * (u[i][j + 1][k][2] - u[i][j - 1][k][2]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * (u[i][j + 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j - 1][k][1]) + yycon2 * (us[i][j + 1][k] - 2.0 * us[i][j][k] + us[i][j - 1][k]) - ty2 * (u[i][j + 1][k][1] * vp1 - u[i][j - 1][k][1] * vm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * (u[i][j + 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j - 1][k][2]) + yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) - ty2 * (u[i][j + 1][k][2] * vp1 - u[i][j - 1][k][2] * vm1 + (u[i][j + 1][k][4] - square[i][j + 1][k] - u[i][j - 1][k][4] + square[i][j - 1][k]) * c2);
rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * (u[i][j + 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j - 1][k][3]) + yycon2 * (ws[i][j + 1][k] - 2.0 * ws[i][j][k] + ws[i][j - 1][k]) - ty2 * (u[i][j + 1][k][3] * vp1 - u[i][j - 1][k][3] * vm1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * (u[i][j + 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j - 1][k][4]) + yycon3 * (qs[i][j + 1][k] - 2.0 * qs[i][j][k] + qs[i][j - 1][k]) + yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk + vm1 * vm1) + yycon5 * (u[i][j + 1][k][4] * rho_i[i][j + 1][k] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i][j - 1][k][4] * rho_i[i][j - 1][k]) - ty2 * ((c1 * u[i][j + 1][k][4] - c2 * square[i][j + 1][k]) * vp1 - (c1 * u[i][j - 1][k][4] - c2 * square[i][j - 1][k]) * vm1);
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
j = 1;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (5.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);
}
}
}
j = 2;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);
}
}
}
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 3; j < grid_points[1] - 3; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);
}
}
}
}
j = grid_points[1] - 3;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m]);
}
}
}
j = grid_points[1] - 2;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4. * u[i][j - 1][k][m] + 5. * u[i][j][k][m]);
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
wijk = ws[i][j][k];
wp1 = ws[i][j][k + 1];
wm1 = ws[i][j][k - 1];
rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * (u[i][j][k + 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k - 1][0]) - tz2 * (u[i][j][k + 1][3] - u[i][j][k - 1][3]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * (u[i][j][k + 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k - 1][1]) + zzcon2 * (us[i][j][k + 1] - 2.0 * us[i][j][k] + us[i][j][k - 1]) - tz2 * (u[i][j][k + 1][1] * wp1 - u[i][j][k - 1][1] * wm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * (u[i][j][k + 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k - 1][2]) + zzcon2 * (vs[i][j][k + 1] - 2.0 * vs[i][j][k] + vs[i][j][k - 1]) - tz2 * (u[i][j][k + 1][2] * wp1 - u[i][j][k - 1][2] * wm1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * (u[i][j][k + 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k - 1][3]) + zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) - tz2 * (u[i][j][k + 1][3] * wp1 - u[i][j][k - 1][3] * wm1 + (u[i][j][k + 1][4] - square[i][j][k + 1] - u[i][j][k - 1][4] + square[i][j][k - 1]) * c2);
rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * (u[i][j][k + 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k - 1][4]) + zzcon3 * (qs[i][j][k + 1] - 2.0 * qs[i][j][k] + qs[i][j][k - 1]) + zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk + wm1 * wm1) + zzcon5 * (u[i][j][k + 1][4] * rho_i[i][j][k + 1] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i][j][k - 1][4] * rho_i[i][j][k - 1]) - tz2 * ((c1 * u[i][j][k + 1][4] - c2 * square[i][j][k + 1]) * wp1 - (c1 * u[i][j][k - 1][4] - c2 * square[i][j][k - 1]) * wm1);
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
k = 1;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (5.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);
}
}
}
k = 2;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);
}
}
}
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 3; k < grid_points[2] - 3; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);
}
}
}
}
k = grid_points[2] - 3;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m]);
}
}
}
k = grid_points[2] - 2;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 5.0 * u[i][j][k][m]);
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
for (i = 1; i < grid_points[0] - 1; i++) {
rhs[i][j][k][m] = rhs[i][j][k][m] * dt;
}
}
}
}
}
{
int i;
int j;
int k;
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (i = 0; i < grid_points[0]; i++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[i][j][k][0][0] = 0.0;
fjac[i][j][k][0][1] = 1.0;
fjac[i][j][k][0][2] = 0.0;
fjac[i][j][k][0][3] = 0.0;
fjac[i][j][k][0][4] = 0.0;
fjac[i][j][k][1][0] = -(u[i][j][k][1] * tmp2 * u[i][j][k][1]) + c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2;
fjac[i][j][k][1][1] = (2.0 - c2) * (u[i][j][k][1] / u[i][j][k][0]);
fjac[i][j][k][1][2] = -c2 * (u[i][j][k][2] * tmp1);
fjac[i][j][k][1][3] = -c2 * (u[i][j][k][3] * tmp1);
fjac[i][j][k][1][4] = c2;
fjac[i][j][k][2][0] = -(u[i][j][k][1] * u[i][j][k][2]) * tmp2;
fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1;
fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1;
fjac[i][j][k][2][3] = 0.0;
fjac[i][j][k][2][4] = 0.0;
fjac[i][j][k][3][0] = -(u[i][j][k][1] * u[i][j][k][3]) * tmp2;
fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1;
fjac[i][j][k][3][2] = 0.0;
fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1;
fjac[i][j][k][3][4] = 0.0;
fjac[i][j][k][4][0] = (c2 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2 - c1 * (u[i][j][k][4] * tmp1)) * (u[i][j][k][1] * tmp1);
fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * (3.0 * u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2;
fjac[i][j][k][4][2] = -c2 * (u[i][j][k][2] * u[i][j][k][1]) * tmp2;
fjac[i][j][k][4][3] = -c2 * (u[i][j][k][3] * u[i][j][k][1]) * tmp2;
fjac[i][j][k][4][4] = c1 * (u[i][j][k][1] * tmp1);
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = -con43 * c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = con43 * c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = -c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = -c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = c3c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = -(con43 * c3c4 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c3c4 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c3c4 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = (con43 * c3c4 - c1345) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = (c3c4 - c1345) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = (c3c4 - c1345) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = c1345 * tmp1;
}
for (i = 1; i < grid_points[0] - 1; i++) {
tmp1 = dt * tx1;
tmp2 = dt * tx2;
lhs[i][j][k][0][0][0] = -tmp2 * fjac[i - 1][j][k][0][0] - tmp1 * njac[i - 1][j][k][0][0] - tmp1 * dx1;
lhs[i][j][k][0][0][1] = -tmp2 * fjac[i - 1][j][k][0][1] - tmp1 * njac[i - 1][j][k][0][1];
lhs[i][j][k][0][0][2] = -tmp2 * fjac[i - 1][j][k][0][2] - tmp1 * njac[i - 1][j][k][0][2];
lhs[i][j][k][0][0][3] = -tmp2 * fjac[i - 1][j][k][0][3] - tmp1 * njac[i - 1][j][k][0][3];
lhs[i][j][k][0][0][4] = -tmp2 * fjac[i - 1][j][k][0][4] - tmp1 * njac[i - 1][j][k][0][4];
lhs[i][j][k][0][1][0] = -tmp2 * fjac[i - 1][j][k][1][0] - tmp1 * njac[i - 1][j][k][1][0];
lhs[i][j][k][0][1][1] = -tmp2 * fjac[i - 1][j][k][1][1] - tmp1 * njac[i - 1][j][k][1][1] - tmp1 * dx2;
lhs[i][j][k][0][1][2] = -tmp2 * fjac[i - 1][j][k][1][2] - tmp1 * njac[i - 1][j][k][1][2];
lhs[i][j][k][0][1][3] = -tmp2 * fjac[i - 1][j][k][1][3] - tmp1 * njac[i - 1][j][k][1][3];
lhs[i][j][k][0][1][4] = -tmp2 * fjac[i - 1][j][k][1][4] - tmp1 * njac[i - 1][j][k][1][4];
lhs[i][j][k][0][2][0] = -tmp2 * fjac[i - 1][j][k][2][0] - tmp1 * njac[i - 1][j][k][2][0];
lhs[i][j][k][0][2][1] = -tmp2 * fjac[i - 1][j][k][2][1] - tmp1 * njac[i - 1][j][k][2][1];
lhs[i][j][k][0][2][2] = -tmp2 * fjac[i - 1][j][k][2][2] - tmp1 * njac[i - 1][j][k][2][2] - tmp1 * dx3;
lhs[i][j][k][0][2][3] = -tmp2 * fjac[i - 1][j][k][2][3] - tmp1 * njac[i - 1][j][k][2][3];
lhs[i][j][k][0][2][4] = -tmp2 * fjac[i - 1][j][k][2][4] - tmp1 * njac[i - 1][j][k][2][4];
lhs[i][j][k][0][3][0] = -tmp2 * fjac[i - 1][j][k][3][0] - tmp1 * njac[i - 1][j][k][3][0];
lhs[i][j][k][0][3][1] = -tmp2 * fjac[i - 1][j][k][3][1] - tmp1 * njac[i - 1][j][k][3][1];
lhs[i][j][k][0][3][2] = -tmp2 * fjac[i - 1][j][k][3][2] - tmp1 * njac[i - 1][j][k][3][2];
lhs[i][j][k][0][3][3] = -tmp2 * fjac[i - 1][j][k][3][3] - tmp1 * njac[i - 1][j][k][3][3] - tmp1 * dx4;
lhs[i][j][k][0][3][4] = -tmp2 * fjac[i - 1][j][k][3][4] - tmp1 * njac[i - 1][j][k][3][4];
lhs[i][j][k][0][4][0] = -tmp2 * fjac[i - 1][j][k][4][0] - tmp1 * njac[i - 1][j][k][4][0];
lhs[i][j][k][0][4][1] = -tmp2 * fjac[i - 1][j][k][4][1] - tmp1 * njac[i - 1][j][k][4][1];
lhs[i][j][k][0][4][2] = -tmp2 * fjac[i - 1][j][k][4][2] - tmp1 * njac[i - 1][j][k][4][2];
lhs[i][j][k][0][4][3] = -tmp2 * fjac[i - 1][j][k][4][3] - tmp1 * njac[i - 1][j][k][4][3];
lhs[i][j][k][0][4][4] = -tmp2 * fjac[i - 1][j][k][4][4] - tmp1 * njac[i - 1][j][k][4][4] - tmp1 * dx5;
lhs[i][j][k][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dx1;
lhs[i][j][k][1][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][1][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][1][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][1][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][1][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dx2;
lhs[i][j][k][1][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][1][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][1][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][1][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][1][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dx3;
lhs[i][j][k][1][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][1][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][1][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][1][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][1][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dx4;
lhs[i][j][k][1][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][1][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][1][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][1][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][1][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dx5;
lhs[i][j][k][2][0][0] = tmp2 * fjac[i + 1][j][k][0][0] - tmp1 * njac[i + 1][j][k][0][0] - tmp1 * dx1;
lhs[i][j][k][2][0][1] = tmp2 * fjac[i + 1][j][k][0][1] - tmp1 * njac[i + 1][j][k][0][1];
lhs[i][j][k][2][0][2] = tmp2 * fjac[i + 1][j][k][0][2] - tmp1 * njac[i + 1][j][k][0][2];
lhs[i][j][k][2][0][3] = tmp2 * fjac[i + 1][j][k][0][3] - tmp1 * njac[i + 1][j][k][0][3];
lhs[i][j][k][2][0][4] = tmp2 * fjac[i + 1][j][k][0][4] - tmp1 * njac[i + 1][j][k][0][4];
lhs[i][j][k][2][1][0] = tmp2 * fjac[i + 1][j][k][1][0] - tmp1 * njac[i + 1][j][k][1][0];
lhs[i][j][k][2][1][1] = tmp2 * fjac[i + 1][j][k][1][1] - tmp1 * njac[i + 1][j][k][1][1] - tmp1 * dx2;
lhs[i][j][k][2][1][2] = tmp2 * fjac[i + 1][j][k][1][2] - tmp1 * njac[i + 1][j][k][1][2];
lhs[i][j][k][2][1][3] = tmp2 * fjac[i + 1][j][k][1][3] - tmp1 * njac[i + 1][j][k][1][3];
lhs[i][j][k][2][1][4] = tmp2 * fjac[i + 1][j][k][1][4] - tmp1 * njac[i + 1][j][k][1][4];
lhs[i][j][k][2][2][0] = tmp2 * fjac[i + 1][j][k][2][0] - tmp1 * njac[i + 1][j][k][2][0];
lhs[i][j][k][2][2][1] = tmp2 * fjac[i + 1][j][k][2][1] - tmp1 * njac[i + 1][j][k][2][1];
lhs[i][j][k][2][2][2] = tmp2 * fjac[i + 1][j][k][2][2] - tmp1 * njac[i + 1][j][k][2][2] - tmp1 * dx3;
lhs[i][j][k][2][2][3] = tmp2 * fjac[i + 1][j][k][2][3] - tmp1 * njac[i + 1][j][k][2][3];
lhs[i][j][k][2][2][4] = tmp2 * fjac[i + 1][j][k][2][4] - tmp1 * njac[i + 1][j][k][2][4];
lhs[i][j][k][2][3][0] = tmp2 * fjac[i + 1][j][k][3][0] - tmp1 * njac[i + 1][j][k][3][0];
lhs[i][j][k][2][3][1] = tmp2 * fjac[i + 1][j][k][3][1] - tmp1 * njac[i + 1][j][k][3][1];
lhs[i][j][k][2][3][2] = tmp2 * fjac[i + 1][j][k][3][2] - tmp1 * njac[i + 1][j][k][3][2];
lhs[i][j][k][2][3][3] = tmp2 * fjac[i + 1][j][k][3][3] - tmp1 * njac[i + 1][j][k][3][3] - tmp1 * dx4;
lhs[i][j][k][2][3][4] = tmp2 * fjac[i + 1][j][k][3][4] - tmp1 * njac[i + 1][j][k][3][4];
lhs[i][j][k][2][4][0] = tmp2 * fjac[i + 1][j][k][4][0] - tmp1 * njac[i + 1][j][k][4][0];
lhs[i][j][k][2][4][1] = tmp2 * fjac[i + 1][j][k][4][1] - tmp1 * njac[i + 1][j][k][4][1];
lhs[i][j][k][2][4][2] = tmp2 * fjac[i + 1][j][k][4][2] - tmp1 * njac[i + 1][j][k][4][2];
lhs[i][j][k][2][4][3] = tmp2 * fjac[i + 1][j][k][4][3] - tmp1 * njac[i + 1][j][k][4][3];
lhs[i][j][k][2][4][4] = tmp2 * fjac[i + 1][j][k][4][4] - tmp1 * njac[i + 1][j][k][4][4] - tmp1 * dx5;
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
int i_imopVar9;
int j_imopVar10;
int k_imopVar11;
int isize;
isize = grid_points[0] - 1;
#pragma omp for nowait
for (j_imopVar10 = 1; j_imopVar10 < grid_points[1] - 1; j_imopVar10++) {
for (k_imopVar11 = 1; k_imopVar11 < grid_points[2] - 1; k_imopVar11++) {
double ( *_imopVarPre338 );
double ( *_imopVarPre339 )[5];
double ( *_imopVarPre340 )[5];
_imopVarPre338 = rhs[0][j_imopVar10][k_imopVar11];
_imopVarPre339 = lhs[0][j_imopVar10][k_imopVar11][2];
_imopVarPre340 = lhs[0][j_imopVar10][k_imopVar11][1];
binvcrhs(_imopVarPre340, _imopVarPre339, _imopVarPre338);
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
for (i_imopVar9 = 1; i_imopVar9 < isize; i_imopVar9++) {
#pragma omp for nowait
for (j_imopVar10 = 1; j_imopVar10 < grid_points[1] - 1; j_imopVar10++) {
for (k_imopVar11 = 1; k_imopVar11 < grid_points[2] - 1; k_imopVar11++) {
double ( *_imopVarPre344 );
double ( *_imopVarPre345 );
double ( *_imopVarPre346 )[5];
_imopVarPre344 = rhs[i_imopVar9][j_imopVar10][k_imopVar11];
_imopVarPre345 = rhs[i_imopVar9 - 1][j_imopVar10][k_imopVar11];
_imopVarPre346 = lhs[i_imopVar9][j_imopVar10][k_imopVar11][0];
matvec_sub(_imopVarPre346, _imopVarPre345, _imopVarPre344);
double ( *_imopVarPre350 )[5];
double ( *_imopVarPre351 )[5];
double ( *_imopVarPre352 )[5];
_imopVarPre350 = lhs[i_imopVar9][j_imopVar10][k_imopVar11][1];
_imopVarPre351 = lhs[i_imopVar9 - 1][j_imopVar10][k_imopVar11][2];
_imopVarPre352 = lhs[i_imopVar9][j_imopVar10][k_imopVar11][0];
matmul_sub(_imopVarPre352, _imopVarPre351, _imopVarPre350);
double ( *_imopVarPre356 );
double ( *_imopVarPre357 )[5];
double ( *_imopVarPre358 )[5];
_imopVarPre356 = rhs[i_imopVar9][j_imopVar10][k_imopVar11];
_imopVarPre357 = lhs[i_imopVar9][j_imopVar10][k_imopVar11][2];
_imopVarPre358 = lhs[i_imopVar9][j_imopVar10][k_imopVar11][1];
binvcrhs(_imopVarPre358, _imopVarPre357, _imopVarPre356);
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, grid_points.f, grid_points, lhs, binvrhs, matvec_sub, lhs.f, j_imopVar10, matmul_sub, binvcrhs])
#pragma omp barrier
}
#pragma omp for nowait
for (j_imopVar10 = 1; j_imopVar10 < grid_points[1] - 1; j_imopVar10++) {
for (k_imopVar11 = 1; k_imopVar11 < grid_points[2] - 1; k_imopVar11++) {
double ( *_imopVarPre362 );
double ( *_imopVarPre363 );
double ( *_imopVarPre364 )[5];
_imopVarPre362 = rhs[isize][j_imopVar10][k_imopVar11];
_imopVarPre363 = rhs[isize - 1][j_imopVar10][k_imopVar11];
_imopVarPre364 = lhs[isize][j_imopVar10][k_imopVar11][0];
matvec_sub(_imopVarPre364, _imopVarPre363, _imopVarPre362);
double ( *_imopVarPre368 )[5];
double ( *_imopVarPre369 )[5];
double ( *_imopVarPre370 )[5];
_imopVarPre368 = lhs[isize][j_imopVar10][k_imopVar11][1];
_imopVarPre369 = lhs[isize - 1][j_imopVar10][k_imopVar11][2];
_imopVarPre370 = lhs[isize][j_imopVar10][k_imopVar11][0];
matmul_sub(_imopVarPre370, _imopVarPre369, _imopVarPre368);
double ( *_imopVarPre373 );
double ( *_imopVarPre374 )[5];
_imopVarPre373 = rhs[i_imopVar9][j_imopVar10][k_imopVar11];
_imopVarPre374 = lhs[i_imopVar9][j_imopVar10][k_imopVar11][1];
binvrhs(_imopVarPre374, _imopVarPre373);
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, j_imopVar7, grid_points.f, grid_points, lhs, lhs.f])
#pragma omp barrier
int i_imopVar6;
int j_imopVar7;
int k_imopVar8;
int m;
int n;
for (i_imopVar6 = grid_points[0] - 2; i_imopVar6 >= 0; i_imopVar6--) {
#pragma omp for nowait
for (j_imopVar7 = 1; j_imopVar7 < grid_points[1] - 1; j_imopVar7++) {
for (k_imopVar8 = 1; k_imopVar8 < grid_points[2] - 1; k_imopVar8++) {
for (m = 0; m < 5; m++) {
for (n = 0; n < 5; n++) {
rhs[i_imopVar6][j_imopVar7][k_imopVar8][m] = rhs[i_imopVar6][j_imopVar7][k_imopVar8][m] - lhs[i_imopVar6][j_imopVar7][k_imopVar8][2][m][n] * rhs[i_imopVar6 + 1][j_imopVar7][k_imopVar8][n];
}
}
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, rhs, j_imopVar7, grid_points.f, grid_points, lhs, lhs.f])
#pragma omp barrier
}
}
// #pragma omp dummyFlush BARRIER_START written([]) read([u, u.f, fjac.f, tmp3, fjac, tmp1, grid_points.f, tmp2, grid_points, c3c4, njac, njac.f, con43, i_imopVar3, c1345, c2, c1])
#pragma omp barrier
{
int i_imopVar3;
int j_imopVar4;
int k_imopVar5;
#pragma omp for nowait
for (i_imopVar3 = 1; i_imopVar3 < grid_points[0] - 1; i_imopVar3++) {
for (j_imopVar4 = 0; j_imopVar4 < grid_points[1]; j_imopVar4++) {
for (k_imopVar5 = 1; k_imopVar5 < grid_points[2] - 1; k_imopVar5++) {
tmp1 = 1.0 / u[i_imopVar3][j_imopVar4][k_imopVar5][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][0][0] = 0.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][0][1] = 0.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][0][2] = 1.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][0][3] = 0.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][0][4] = 0.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][1][0] = -(u[i_imopVar3][j_imopVar4][k_imopVar5][1] * u[i_imopVar3][j_imopVar4][k_imopVar5][2]) * tmp2;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][1][1] = u[i_imopVar3][j_imopVar4][k_imopVar5][2] * tmp1;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][1][2] = u[i_imopVar3][j_imopVar4][k_imopVar5][1] * tmp1;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][1][3] = 0.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][1][4] = 0.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][2][0] = -(u[i_imopVar3][j_imopVar4][k_imopVar5][2] * u[i_imopVar3][j_imopVar4][k_imopVar5][2] * tmp2) + 0.50 * c2 * ((u[i_imopVar3][j_imopVar4][k_imopVar5][1] * u[i_imopVar3][j_imopVar4][k_imopVar5][1] + u[i_imopVar3][j_imopVar4][k_imopVar5][2] * u[i_imopVar3][j_imopVar4][k_imopVar5][2] + u[i_imopVar3][j_imopVar4][k_imopVar5][3] * u[i_imopVar3][j_imopVar4][k_imopVar5][3]) * tmp2);
fjac[i_imopVar3][j_imopVar4][k_imopVar5][2][1] = -c2 * u[i_imopVar3][j_imopVar4][k_imopVar5][1] * tmp1;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][2][2] = (2.0 - c2) * u[i_imopVar3][j_imopVar4][k_imopVar5][2] * tmp1;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][2][3] = -c2 * u[i_imopVar3][j_imopVar4][k_imopVar5][3] * tmp1;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][2][4] = c2;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][3][0] = -(u[i_imopVar3][j_imopVar4][k_imopVar5][2] * u[i_imopVar3][j_imopVar4][k_imopVar5][3]) * tmp2;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][3][1] = 0.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][3][2] = u[i_imopVar3][j_imopVar4][k_imopVar5][3] * tmp1;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][3][3] = u[i_imopVar3][j_imopVar4][k_imopVar5][2] * tmp1;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][3][4] = 0.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][4][0] = (c2 * (u[i_imopVar3][j_imopVar4][k_imopVar5][1] * u[i_imopVar3][j_imopVar4][k_imopVar5][1] + u[i_imopVar3][j_imopVar4][k_imopVar5][2] * u[i_imopVar3][j_imopVar4][k_imopVar5][2] + u[i_imopVar3][j_imopVar4][k_imopVar5][3] * u[i_imopVar3][j_imopVar4][k_imopVar5][3]) * tmp2 - c1 * u[i_imopVar3][j_imopVar4][k_imopVar5][4] * tmp1) * u[i_imopVar3][j_imopVar4][k_imopVar5][2] * tmp1;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][4][1] = -c2 * u[i_imopVar3][j_imopVar4][k_imopVar5][1] * u[i_imopVar3][j_imopVar4][k_imopVar5][2] * tmp2;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][4][2] = c1 * u[i_imopVar3][j_imopVar4][k_imopVar5][4] * tmp1 - 0.50 * c2 * ((u[i_imopVar3][j_imopVar4][k_imopVar5][1] * u[i_imopVar3][j_imopVar4][k_imopVar5][1] + 3.0 * u[i_imopVar3][j_imopVar4][k_imopVar5][2] * u[i_imopVar3][j_imopVar4][k_imopVar5][2] + u[i_imopVar3][j_imopVar4][k_imopVar5][3] * u[i_imopVar3][j_imopVar4][k_imopVar5][3]) * tmp2);
fjac[i_imopVar3][j_imopVar4][k_imopVar5][4][3] = -c2 * (u[i_imopVar3][j_imopVar4][k_imopVar5][2] * u[i_imopVar3][j_imopVar4][k_imopVar5][3]) * tmp2;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][4][4] = c1 * u[i_imopVar3][j_imopVar4][k_imopVar5][2] * tmp1;
njac[i_imopVar3][j_imopVar4][k_imopVar5][0][0] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][0][1] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][0][2] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][0][3] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][0][4] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][1][0] = -c3c4 * tmp2 * u[i_imopVar3][j_imopVar4][k_imopVar5][1];
njac[i_imopVar3][j_imopVar4][k_imopVar5][1][1] = c3c4 * tmp1;
njac[i_imopVar3][j_imopVar4][k_imopVar5][1][2] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][1][3] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][1][4] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][2][0] = -con43 * c3c4 * tmp2 * u[i_imopVar3][j_imopVar4][k_imopVar5][2];
njac[i_imopVar3][j_imopVar4][k_imopVar5][2][1] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][2][2] = con43 * c3c4 * tmp1;
njac[i_imopVar3][j_imopVar4][k_imopVar5][2][3] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][2][4] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][3][0] = -c3c4 * tmp2 * u[i_imopVar3][j_imopVar4][k_imopVar5][3];
njac[i_imopVar3][j_imopVar4][k_imopVar5][3][1] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][3][2] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][3][3] = c3c4 * tmp1;
njac[i_imopVar3][j_imopVar4][k_imopVar5][3][4] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][4][0] = -(c3c4 - c1345) * tmp3 * (((u[i_imopVar3][j_imopVar4][k_imopVar5][1]) * (u[i_imopVar3][j_imopVar4][k_imopVar5][1]))) - (con43 * c3c4 - c1345) * tmp3 * (((u[i_imopVar3][j_imopVar4][k_imopVar5][2]) * (u[i_imopVar3][j_imopVar4][k_imopVar5][2]))) - (c3c4 - c1345) * tmp3 * (((u[i_imopVar3][j_imopVar4][k_imopVar5][3]) * (u[i_imopVar3][j_imopVar4][k_imopVar5][3]))) - c1345 * tmp2 * u[i_imopVar3][j_imopVar4][k_imopVar5][4];
njac[i_imopVar3][j_imopVar4][k_imopVar5][4][1] = (c3c4 - c1345) * tmp2 * u[i_imopVar3][j_imopVar4][k_imopVar5][1];
njac[i_imopVar3][j_imopVar4][k_imopVar5][4][2] = (con43 * c3c4 - c1345) * tmp2 * u[i_imopVar3][j_imopVar4][k_imopVar5][2];
njac[i_imopVar3][j_imopVar4][k_imopVar5][4][3] = (c3c4 - c1345) * tmp2 * u[i_imopVar3][j_imopVar4][k_imopVar5][3];
njac[i_imopVar3][j_imopVar4][k_imopVar5][4][4] = c1345 * tmp1;
}
}
}
// #pragma omp dummyFlush BARRIER_START written([fjac.f, tmp3, tmp1, tmp2, njac.f]) read([fjac.f, dt, fjac, tmp1, grid_points.f, tmp2, grid_points, dy5, njac, njac.f, ty2, ty1, lhs, dy2, lhs.f, i_imopVar3, dy1, dy4, dy3])
#pragma omp barrier
#pragma omp for nowait
for (i_imopVar3 = 1; i_imopVar3 < grid_points[0] - 1; i_imopVar3++) {
for (j_imopVar4 = 1; j_imopVar4 < grid_points[1] - 1; j_imopVar4++) {
for (k_imopVar5 = 1; k_imopVar5 < grid_points[2] - 1; k_imopVar5++) {
tmp1 = dt * ty1;
tmp2 = dt * ty2;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][0][0] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][0] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][0] - tmp1 * dy1;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][0][1] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][1] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][0][2] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][2] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][0][3] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][3] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][0][4] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][4] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][1][0] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][0] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][1][1] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][1] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][1] - tmp1 * dy2;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][1][2] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][2] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][1][3] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][3] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][1][4] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][4] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][2][0] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][0] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][2][1] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][1] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][2][2] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][2] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][2] - tmp1 * dy3;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][2][3] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][3] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][2][4] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][4] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][3][0] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][0] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][3][1] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][1] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][3][2] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][2] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][3][3] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][3] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][3] - tmp1 * dy4;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][3][4] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][4] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][4][0] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][0] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][4][1] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][1] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][4][2] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][2] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][4][3] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][3] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][4][4] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][4] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][4] - tmp1 * dy5;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][0][0] + tmp1 * 2.0 * dy1;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][0][1] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][0][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][0][2] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][0][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][0][3] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][0][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][0][4] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][0][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][1][0] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][1][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][1][1] + tmp1 * 2.0 * dy2;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][1][2] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][1][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][1][3] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][1][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][1][4] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][1][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][2][0] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][2][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][2][1] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][2][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][2][2] + tmp1 * 2.0 * dy3;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][2][3] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][2][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][2][4] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][2][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][3][0] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][3][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][3][1] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][3][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][3][2] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][3][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][3][3] + tmp1 * 2.0 * dy4;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][3][4] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][3][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][4][0] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][4][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][4][1] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][4][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][4][2] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][4][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][4][3] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][4][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][4][4] + tmp1 * 2.0 * dy5;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][0][0] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][0] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][0] - tmp1 * dy1;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][0][1] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][1] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][0][2] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][2] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][0][3] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][3] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][0][4] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][4] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][1][0] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][0] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][1][1] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][1] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][1] - tmp1 * dy2;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][1][2] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][2] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][1][3] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][3] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][1][4] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][4] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][2][0] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][0] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][2][1] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][1] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][2][2] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][2] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][2] - tmp1 * dy3;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][2][3] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][3] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][2][4] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][4] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][3][0] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][0] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][3][1] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][1] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][3][2] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][2] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][3][3] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][3] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][3] - tmp1 * dy4;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][3][4] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][4] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][4][0] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][0] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][4][1] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][1] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][4][2] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][2] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][4][3] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][3] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][4][4] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][4] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][4] - tmp1 * dy5;
}
}
}
// #pragma omp dummyFlush BARRIER_START written([tmp1, tmp2, lhs.f]) read([rhs.f, rhs, grid_points.f, grid_points, lhs, lhs.f, i_imopVar0, binvcrhs])
#pragma omp barrier
int i_imopVar0;
int j_imopVar1;
int k_imopVar2;
int jsize;
jsize = grid_points[1] - 1;
#pragma omp for nowait
for (i_imopVar0 = 1; i_imopVar0 < grid_points[0] - 1; i_imopVar0++) {
for (k_imopVar2 = 1; k_imopVar2 < grid_points[2] - 1; k_imopVar2++) {
double ( *_imopVarPre378 );
double ( *_imopVarPre379 )[5];
double ( *_imopVarPre380 )[5];
_imopVarPre378 = rhs[i_imopVar0][0][k_imopVar2];
_imopVarPre379 = lhs[i_imopVar0][0][k_imopVar2][2];
_imopVarPre380 = lhs[i_imopVar0][0][k_imopVar2][1];
binvcrhs(_imopVarPre380, _imopVarPre379, _imopVarPre378);
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, grid_points.f, grid_points, lhs, binvrhs, matvec_sub, lhs.f, i_imopVar0, matmul_sub, binvcrhs])
#pragma omp barrier
for (j_imopVar1 = 1; j_imopVar1 < jsize; j_imopVar1++) {
#pragma omp for nowait
for (i_imopVar0 = 1; i_imopVar0 < grid_points[0] - 1; i_imopVar0++) {
for (k_imopVar2 = 1; k_imopVar2 < grid_points[2] - 1; k_imopVar2++) {
double ( *_imopVarPre384 );
double ( *_imopVarPre385 );
double ( *_imopVarPre386 )[5];
_imopVarPre384 = rhs[i_imopVar0][j_imopVar1][k_imopVar2];
_imopVarPre385 = rhs[i_imopVar0][j_imopVar1 - 1][k_imopVar2];
_imopVarPre386 = lhs[i_imopVar0][j_imopVar1][k_imopVar2][0];
matvec_sub(_imopVarPre386, _imopVarPre385, _imopVarPre384);
double ( *_imopVarPre390 )[5];
double ( *_imopVarPre391 )[5];
double ( *_imopVarPre392 )[5];
_imopVarPre390 = lhs[i_imopVar0][j_imopVar1][k_imopVar2][1];
_imopVarPre391 = lhs[i_imopVar0][j_imopVar1 - 1][k_imopVar2][2];
_imopVarPre392 = lhs[i_imopVar0][j_imopVar1][k_imopVar2][0];
matmul_sub(_imopVarPre392, _imopVarPre391, _imopVarPre390);
double ( *_imopVarPre396 );
double ( *_imopVarPre397 )[5];
double ( *_imopVarPre398 )[5];
_imopVarPre396 = rhs[i_imopVar0][j_imopVar1][k_imopVar2];
_imopVarPre397 = lhs[i_imopVar0][j_imopVar1][k_imopVar2][2];
_imopVarPre398 = lhs[i_imopVar0][j_imopVar1][k_imopVar2][1];
binvcrhs(_imopVarPre398, _imopVarPre397, _imopVarPre396);
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, grid_points.f, grid_points, lhs, binvrhs, matvec_sub, lhs.f, i_imopVar0, matmul_sub, binvcrhs])
#pragma omp barrier
}
#pragma omp for nowait
for (i_imopVar0 = 1; i_imopVar0 < grid_points[0] - 1; i_imopVar0++) {
for (k_imopVar2 = 1; k_imopVar2 < grid_points[2] - 1; k_imopVar2++) {
double ( *_imopVarPre402 );
double ( *_imopVarPre403 );
double ( *_imopVarPre404 )[5];
_imopVarPre402 = rhs[i_imopVar0][jsize][k_imopVar2];
_imopVarPre403 = rhs[i_imopVar0][jsize - 1][k_imopVar2];
_imopVarPre404 = lhs[i_imopVar0][jsize][k_imopVar2][0];
matvec_sub(_imopVarPre404, _imopVarPre403, _imopVarPre402);
double ( *_imopVarPre408 )[5];
double ( *_imopVarPre409 )[5];
double ( *_imopVarPre410 )[5];
_imopVarPre408 = lhs[i_imopVar0][jsize][k_imopVar2][1];
_imopVarPre409 = lhs[i_imopVar0][jsize - 1][k_imopVar2][2];
_imopVarPre410 = lhs[i_imopVar0][jsize][k_imopVar2][0];
matmul_sub(_imopVarPre410, _imopVarPre409, _imopVarPre408);
double ( *_imopVarPre413 );
double ( *_imopVarPre414 )[5];
_imopVarPre413 = rhs[i_imopVar0][jsize][k_imopVar2];
_imopVarPre414 = lhs[i_imopVar0][jsize][k_imopVar2][1];
binvrhs(_imopVarPre414, _imopVarPre413);
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, i, grid_points.f, grid_points, lhs, lhs.f])
#pragma omp barrier
int i;
int j;
int k;
int m;
int n;
for (j = grid_points[1] - 2; j >= 0; j--) {
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
for (n = 0; n < 5; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][2][m][n] * rhs[i][j + 1][k][n];
}
}
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, rhs, i, grid_points.f, grid_points, lhs, lhs.f])
#pragma omp barrier
}
}
// #pragma omp dummyFlush BARRIER_START written([]) read([u, fjac.f, u.f, tmp3, c4, fjac, c3, tmp1, grid_points.f, tmp2, grid_points, c3c4, njac, i, njac.f, con43, c2, c1345, c1])
#pragma omp barrier
{
int i;
int j;
int k;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 0; k < grid_points[2]; k++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[i][j][k][0][0] = 0.0;
fjac[i][j][k][0][1] = 0.0;
fjac[i][j][k][0][2] = 0.0;
fjac[i][j][k][0][3] = 1.0;
fjac[i][j][k][0][4] = 0.0;
fjac[i][j][k][1][0] = -(u[i][j][k][1] * u[i][j][k][3]) * tmp2;
fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1;
fjac[i][j][k][1][2] = 0.0;
fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1;
fjac[i][j][k][1][4] = 0.0;
fjac[i][j][k][2][0] = -(u[i][j][k][2] * u[i][j][k][3]) * tmp2;
fjac[i][j][k][2][1] = 0.0;
fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1;
fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1;
fjac[i][j][k][2][4] = 0.0;
fjac[i][j][k][3][0] = -(u[i][j][k][3] * u[i][j][k][3] * tmp2) + 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2);
fjac[i][j][k][3][1] = -c2 * u[i][j][k][1] * tmp1;
fjac[i][j][k][3][2] = -c2 * u[i][j][k][2] * tmp1;
fjac[i][j][k][3][3] = (2.0 - c2) * u[i][j][k][3] * tmp1;
fjac[i][j][k][3][4] = c2;
fjac[i][j][k][4][0] = (c2 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2 - c1 * (u[i][j][k][4] * tmp1)) * (u[i][j][k][3] * tmp1);
fjac[i][j][k][4][1] = -c2 * (u[i][j][k][1] * u[i][j][k][3]) * tmp2;
fjac[i][j][k][4][2] = -c2 * (u[i][j][k][2] * u[i][j][k][3]) * tmp2;
fjac[i][j][k][4][3] = c1 * (u[i][j][k][4] * tmp1) - 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + 3.0 * u[i][j][k][3] * u[i][j][k][3]) * tmp2);
fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1;
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = -c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = -c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = -con43 * c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = -(c3c4 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c3c4 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (con43 * c3c4 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = (c3c4 - c1345) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = (c3c4 - c1345) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = (con43 * c3c4 - c1345) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = c1345 * tmp1;
}
}
}
// #pragma omp dummyFlush BARRIER_START written([fjac.f, tmp3, tmp1, tmp2, njac.f]) read([fjac.f, dt, fjac, tmp1, grid_points.f, tmp2, grid_points, njac, i, njac.f, tz2, lhs, tz1, dz3, lhs.f, dz2, dz5, dz4, dz1])
#pragma omp barrier
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
tmp1 = dt * tz1;
tmp2 = dt * tz2;
lhs[i][j][k][0][0][0] = -tmp2 * fjac[i][j][k - 1][0][0] - tmp1 * njac[i][j][k - 1][0][0] - tmp1 * dz1;
lhs[i][j][k][0][0][1] = -tmp2 * fjac[i][j][k - 1][0][1] - tmp1 * njac[i][j][k - 1][0][1];
lhs[i][j][k][0][0][2] = -tmp2 * fjac[i][j][k - 1][0][2] - tmp1 * njac[i][j][k - 1][0][2];
lhs[i][j][k][0][0][3] = -tmp2 * fjac[i][j][k - 1][0][3] - tmp1 * njac[i][j][k - 1][0][3];
lhs[i][j][k][0][0][4] = -tmp2 * fjac[i][j][k - 1][0][4] - tmp1 * njac[i][j][k - 1][0][4];
lhs[i][j][k][0][1][0] = -tmp2 * fjac[i][j][k - 1][1][0] - tmp1 * njac[i][j][k - 1][1][0];
lhs[i][j][k][0][1][1] = -tmp2 * fjac[i][j][k - 1][1][1] - tmp1 * njac[i][j][k - 1][1][1] - tmp1 * dz2;
lhs[i][j][k][0][1][2] = -tmp2 * fjac[i][j][k - 1][1][2] - tmp1 * njac[i][j][k - 1][1][2];
lhs[i][j][k][0][1][3] = -tmp2 * fjac[i][j][k - 1][1][3] - tmp1 * njac[i][j][k - 1][1][3];
lhs[i][j][k][0][1][4] = -tmp2 * fjac[i][j][k - 1][1][4] - tmp1 * njac[i][j][k - 1][1][4];
lhs[i][j][k][0][2][0] = -tmp2 * fjac[i][j][k - 1][2][0] - tmp1 * njac[i][j][k - 1][2][0];
lhs[i][j][k][0][2][1] = -tmp2 * fjac[i][j][k - 1][2][1] - tmp1 * njac[i][j][k - 1][2][1];
lhs[i][j][k][0][2][2] = -tmp2 * fjac[i][j][k - 1][2][2] - tmp1 * njac[i][j][k - 1][2][2] - tmp1 * dz3;
lhs[i][j][k][0][2][3] = -tmp2 * fjac[i][j][k - 1][2][3] - tmp1 * njac[i][j][k - 1][2][3];
lhs[i][j][k][0][2][4] = -tmp2 * fjac[i][j][k - 1][2][4] - tmp1 * njac[i][j][k - 1][2][4];
lhs[i][j][k][0][3][0] = -tmp2 * fjac[i][j][k - 1][3][0] - tmp1 * njac[i][j][k - 1][3][0];
lhs[i][j][k][0][3][1] = -tmp2 * fjac[i][j][k - 1][3][1] - tmp1 * njac[i][j][k - 1][3][1];
lhs[i][j][k][0][3][2] = -tmp2 * fjac[i][j][k - 1][3][2] - tmp1 * njac[i][j][k - 1][3][2];
lhs[i][j][k][0][3][3] = -tmp2 * fjac[i][j][k - 1][3][3] - tmp1 * njac[i][j][k - 1][3][3] - tmp1 * dz4;
lhs[i][j][k][0][3][4] = -tmp2 * fjac[i][j][k - 1][3][4] - tmp1 * njac[i][j][k - 1][3][4];
lhs[i][j][k][0][4][0] = -tmp2 * fjac[i][j][k - 1][4][0] - tmp1 * njac[i][j][k - 1][4][0];
lhs[i][j][k][0][4][1] = -tmp2 * fjac[i][j][k - 1][4][1] - tmp1 * njac[i][j][k - 1][4][1];
lhs[i][j][k][0][4][2] = -tmp2 * fjac[i][j][k - 1][4][2] - tmp1 * njac[i][j][k - 1][4][2];
lhs[i][j][k][0][4][3] = -tmp2 * fjac[i][j][k - 1][4][3] - tmp1 * njac[i][j][k - 1][4][3];
lhs[i][j][k][0][4][4] = -tmp2 * fjac[i][j][k - 1][4][4] - tmp1 * njac[i][j][k - 1][4][4] - tmp1 * dz5;
lhs[i][j][k][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dz1;
lhs[i][j][k][1][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][1][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][1][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][1][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][1][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dz2;
lhs[i][j][k][1][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][1][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][1][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][1][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][1][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dz3;
lhs[i][j][k][1][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][1][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][1][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][1][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][1][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dz4;
lhs[i][j][k][1][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][1][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][1][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][1][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][1][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dz5;
lhs[i][j][k][2][0][0] = tmp2 * fjac[i][j][k + 1][0][0] - tmp1 * njac[i][j][k + 1][0][0] - tmp1 * dz1;
lhs[i][j][k][2][0][1] = tmp2 * fjac[i][j][k + 1][0][1] - tmp1 * njac[i][j][k + 1][0][1];
lhs[i][j][k][2][0][2] = tmp2 * fjac[i][j][k + 1][0][2] - tmp1 * njac[i][j][k + 1][0][2];
lhs[i][j][k][2][0][3] = tmp2 * fjac[i][j][k + 1][0][3] - tmp1 * njac[i][j][k + 1][0][3];
lhs[i][j][k][2][0][4] = tmp2 * fjac[i][j][k + 1][0][4] - tmp1 * njac[i][j][k + 1][0][4];
lhs[i][j][k][2][1][0] = tmp2 * fjac[i][j][k + 1][1][0] - tmp1 * njac[i][j][k + 1][1][0];
lhs[i][j][k][2][1][1] = tmp2 * fjac[i][j][k + 1][1][1] - tmp1 * njac[i][j][k + 1][1][1] - tmp1 * dz2;
lhs[i][j][k][2][1][2] = tmp2 * fjac[i][j][k + 1][1][2] - tmp1 * njac[i][j][k + 1][1][2];
lhs[i][j][k][2][1][3] = tmp2 * fjac[i][j][k + 1][1][3] - tmp1 * njac[i][j][k + 1][1][3];
lhs[i][j][k][2][1][4] = tmp2 * fjac[i][j][k + 1][1][4] - tmp1 * njac[i][j][k + 1][1][4];
lhs[i][j][k][2][2][0] = tmp2 * fjac[i][j][k + 1][2][0] - tmp1 * njac[i][j][k + 1][2][0];
lhs[i][j][k][2][2][1] = tmp2 * fjac[i][j][k + 1][2][1] - tmp1 * njac[i][j][k + 1][2][1];
lhs[i][j][k][2][2][2] = tmp2 * fjac[i][j][k + 1][2][2] - tmp1 * njac[i][j][k + 1][2][2] - tmp1 * dz3;
lhs[i][j][k][2][2][3] = tmp2 * fjac[i][j][k + 1][2][3] - tmp1 * njac[i][j][k + 1][2][3];
lhs[i][j][k][2][2][4] = tmp2 * fjac[i][j][k + 1][2][4] - tmp1 * njac[i][j][k + 1][2][4];
lhs[i][j][k][2][3][0] = tmp2 * fjac[i][j][k + 1][3][0] - tmp1 * njac[i][j][k + 1][3][0];
lhs[i][j][k][2][3][1] = tmp2 * fjac[i][j][k + 1][3][1] - tmp1 * njac[i][j][k + 1][3][1];
lhs[i][j][k][2][3][2] = tmp2 * fjac[i][j][k + 1][3][2] - tmp1 * njac[i][j][k + 1][3][2];
lhs[i][j][k][2][3][3] = tmp2 * fjac[i][j][k + 1][3][3] - tmp1 * njac[i][j][k + 1][3][3] - tmp1 * dz4;
lhs[i][j][k][2][3][4] = tmp2 * fjac[i][j][k + 1][3][4] - tmp1 * njac[i][j][k + 1][3][4];
lhs[i][j][k][2][4][0] = tmp2 * fjac[i][j][k + 1][4][0] - tmp1 * njac[i][j][k + 1][4][0];
lhs[i][j][k][2][4][1] = tmp2 * fjac[i][j][k + 1][4][1] - tmp1 * njac[i][j][k + 1][4][1];
lhs[i][j][k][2][4][2] = tmp2 * fjac[i][j][k + 1][4][2] - tmp1 * njac[i][j][k + 1][4][2];
lhs[i][j][k][2][4][3] = tmp2 * fjac[i][j][k + 1][4][3] - tmp1 * njac[i][j][k + 1][4][3];
lhs[i][j][k][2][4][4] = tmp2 * fjac[i][j][k + 1][4][4] - tmp1 * njac[i][j][k + 1][4][4] - tmp1 * dz5;
}
}
}
// #pragma omp dummyFlush BARRIER_START written([tmp1, tmp2, lhs.f]) read([rhs.f, rhs, grid_points.f, grid_points, lhs, lhs.f, i_imopVar15, binvcrhs])
#pragma omp barrier
int i_imopVar15;
int j_imopVar16;
int k_imopVar17;
int ksize;
ksize = grid_points[2] - 1;
#pragma omp for nowait
for (i_imopVar15 = 1; i_imopVar15 < grid_points[0] - 1; i_imopVar15++) {
for (j_imopVar16 = 1; j_imopVar16 < grid_points[1] - 1; j_imopVar16++) {
double ( *_imopVarPre418 );
double ( *_imopVarPre419 )[5];
double ( *_imopVarPre420 )[5];
_imopVarPre418 = rhs[i_imopVar15][j_imopVar16][0];
_imopVarPre419 = lhs[i_imopVar15][j_imopVar16][0][2];
_imopVarPre420 = lhs[i_imopVar15][j_imopVar16][0][1];
binvcrhs(_imopVarPre420, _imopVarPre419, _imopVarPre418);
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, grid_points.f, grid_points, lhs, binvrhs, matvec_sub, lhs.f, i_imopVar15, matmul_sub, binvcrhs])
#pragma omp barrier
for (k_imopVar17 = 1; k_imopVar17 < ksize; k_imopVar17++) {
#pragma omp for nowait
for (i_imopVar15 = 1; i_imopVar15 < grid_points[0] - 1; i_imopVar15++) {
for (j_imopVar16 = 1; j_imopVar16 < grid_points[1] - 1; j_imopVar16++) {
double ( *_imopVarPre424 );
double ( *_imopVarPre425 );
double ( *_imopVarPre426 )[5];
_imopVarPre424 = rhs[i_imopVar15][j_imopVar16][k_imopVar17];
_imopVarPre425 = rhs[i_imopVar15][j_imopVar16][k_imopVar17 - 1];
_imopVarPre426 = lhs[i_imopVar15][j_imopVar16][k_imopVar17][0];
matvec_sub(_imopVarPre426, _imopVarPre425, _imopVarPre424);
double ( *_imopVarPre430 )[5];
double ( *_imopVarPre431 )[5];
double ( *_imopVarPre432 )[5];
_imopVarPre430 = lhs[i_imopVar15][j_imopVar16][k_imopVar17][1];
_imopVarPre431 = lhs[i_imopVar15][j_imopVar16][k_imopVar17 - 1][2];
_imopVarPre432 = lhs[i_imopVar15][j_imopVar16][k_imopVar17][0];
matmul_sub(_imopVarPre432, _imopVarPre431, _imopVarPre430);
double ( *_imopVarPre436 );
double ( *_imopVarPre437 )[5];
double ( *_imopVarPre438 )[5];
_imopVarPre436 = rhs[i_imopVar15][j_imopVar16][k_imopVar17];
_imopVarPre437 = lhs[i_imopVar15][j_imopVar16][k_imopVar17][2];
_imopVarPre438 = lhs[i_imopVar15][j_imopVar16][k_imopVar17][1];
binvcrhs(_imopVarPre438, _imopVarPre437, _imopVarPre436);
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, grid_points.f, grid_points, lhs, binvrhs, matvec_sub, lhs.f, i_imopVar15, matmul_sub, binvcrhs])
#pragma omp barrier
}
#pragma omp for nowait
for (i_imopVar15 = 1; i_imopVar15 < grid_points[0] - 1; i_imopVar15++) {
for (j_imopVar16 = 1; j_imopVar16 < grid_points[1] - 1; j_imopVar16++) {
double ( *_imopVarPre442 );
double ( *_imopVarPre443 );
double ( *_imopVarPre444 )[5];
_imopVarPre442 = rhs[i_imopVar15][j_imopVar16][ksize];
_imopVarPre443 = rhs[i_imopVar15][j_imopVar16][ksize - 1];
_imopVarPre444 = lhs[i_imopVar15][j_imopVar16][ksize][0];
matvec_sub(_imopVarPre444, _imopVarPre443, _imopVarPre442);
double ( *_imopVarPre448 )[5];
double ( *_imopVarPre449 )[5];
double ( *_imopVarPre450 )[5];
_imopVarPre448 = lhs[i_imopVar15][j_imopVar16][ksize][1];
_imopVarPre449 = lhs[i_imopVar15][j_imopVar16][ksize - 1][2];
_imopVarPre450 = lhs[i_imopVar15][j_imopVar16][ksize][0];
matmul_sub(_imopVarPre450, _imopVarPre449, _imopVarPre448);
double ( *_imopVarPre453 );
double ( *_imopVarPre454 )[5];
_imopVarPre453 = rhs[i_imopVar15][j_imopVar16][ksize];
_imopVarPre454 = lhs[i_imopVar15][j_imopVar16][ksize][1];
binvrhs(_imopVarPre454, _imopVarPre453);
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, grid_points.f, i_imopVar12, grid_points, lhs, lhs.f])
#pragma omp barrier
int i_imopVar12;
int j_imopVar13;
int k_imopVar14;
int m;
int n;
#pragma omp for nowait
for (i_imopVar12 = 1; i_imopVar12 < grid_points[0] - 1; i_imopVar12++) {
for (j_imopVar13 = 1; j_imopVar13 < grid_points[1] - 1; j_imopVar13++) {
for (k_imopVar14 = grid_points[2] - 2; k_imopVar14 >= 0; k_imopVar14--) {
for (m = 0; m < 5; m++) {
for (n = 0; n < 5; n++) {
rhs[i_imopVar12][j_imopVar13][k_imopVar14][m] = rhs[i_imopVar12][j_imopVar13][k_imopVar14][m] - lhs[i_imopVar12][j_imopVar13][k_imopVar14][2][m][n] * rhs[i_imopVar12][j_imopVar13][k_imopVar14 + 1][n];
}
}
}
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([])
#pragma omp barrier
}
// #pragma omp dummyFlush BARRIER_START written([]) read([u, rhs.f, u.f, rhs, i, grid_points.f, grid_points, add])
#pragma omp barrier
add();
// #pragma omp dummyFlush BARRIER_START written([u.f]) read([i, u, u.f])
#pragma omp barrier
{
int i;
int j;
int k;
int m;
int ix;
int iy;
int iz;
double xi;
double eta;
double zeta;
double Pface[2][3][5];
double Pxi;
double Peta;
double Pzeta;
double temp[5];
#pragma omp for nowait
for (i = 0; i < 12; i++) {
for (j = 0; j < 12; j++) {
for (k = 0; k < 12; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = 1.0;
}
}
}
}
// #pragma omp dummyFlush BARRIER_START written([u.f]) read([ce, u, ce.f, i, u.f, grid_points.f, grid_points, dnym1, dnxm1, dnzm1, exact_solution])
#pragma omp barrier
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double) i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double) j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double) k * dnzm1;
for (ix = 0; ix < 2; ix++) {
double *_imopVarPre191;
double _imopVarPre192;
_imopVarPre191 = &(Pface[ix][0][0]);
_imopVarPre192 = (double) ix;
exact_solution(_imopVarPre192, eta, zeta, _imopVarPre191);
}
for (iy = 0; iy < 2; iy++) {
double *_imopVarPre195;
double _imopVarPre196;
_imopVarPre195 = &Pface[iy][1][0];
_imopVarPre196 = (double) iy;
exact_solution(xi, _imopVarPre196, zeta, _imopVarPre195);
}
for (iz = 0; iz < 2; iz++) {
double *_imopVarPre199;
double _imopVarPre200;
_imopVarPre199 = &Pface[iz][2][0];
_imopVarPre200 = (double) iz;
exact_solution(xi, eta, _imopVarPre200, _imopVarPre199);
}
for (m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] + (1.0 - xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] + (1.0 - eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] + (1.0 - zeta) * Pface[0][2][m];
u[i][j][k][m] = Pxi + Peta + Pzeta - Pxi * Peta - Pxi * Pzeta - Peta * Pzeta + Pxi * Peta * Pzeta;
}
}
}
}
// #pragma omp dummyFlush BARRIER_START written([u.f, Pface.f, temp.f, temp.f, temp.f, Pface.f, u_exact.f, Pface.f]) read([j, ce, u, ce.f, u.f, grid_points.f, grid_points, dnym1, dnzm1, exact_solution])
#pragma omp barrier
i = 0;
xi = 0.0;
#pragma omp for nowait
for (j = 0; j < grid_points[1]; j++) {
eta = (double) j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double) k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
i = grid_points[0] - 1;
xi = 1.0;
#pragma omp for nowait
for (j = 0; j < grid_points[1]; j++) {
eta = (double) j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double) k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
// #pragma omp dummyFlush BARRIER_START written([u.f, Pface.f, temp.f, temp.f, temp.f, Pface.f, u_exact.f, Pface.f]) read([ce, i, ce.f, u, u.f, grid_points.f, grid_points, dnxm1, dnzm1, exact_solution])
#pragma omp barrier
j = 0;
eta = 0.0;
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double) i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double) k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
j = grid_points[1] - 1;
eta = 1.0;
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double) i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double) k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
// #pragma omp dummyFlush BARRIER_START written([Pface.f, u.f, temp.f, temp.f, temp.f, Pface.f, u_exact.f, Pface.f]) read([ce, i, u, ce.f, u.f, grid_points.f, grid_points, dnym1, dnxm1, exact_solution])
#pragma omp barrier
k = 0;
zeta = 0.0;
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double) i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double) j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
k = grid_points[2] - 1;
zeta = 1.0;
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double) i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double) j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
}
// #pragma omp dummyFlush BARRIER_START written([u.f, Pface.f, temp.f, temp.f, temp.f, Pface.f, u_exact.f, Pface.f]) read([xcr.f, xxcon5, dy3ty1, vs.f, _imopVarPre270, yycon3, ce.f, dy1ty1, timer_start, _imopVarPre276, nullCell, u, dy5ty1, xxcon4, us, dz4tz1, qs, ty2, yycon4, niter, dnxm1, dx4tx1, u.f, xce.f, grid_points.f, j, sqrt, xxcon3, square.f, us.f, _imopVarPre292, dz2tz1, qs.f, timer_read, _imopVarPre282, yycon5, zzcon3, i, rho_i, xce, dt, grid_points, dx2tx1, square, i, xxcon2, &verified, _imopVarPre293, tz2, zzcon2, rho_i.f, dy2ty1, rhs.f, &class, printf, _imopVarPre294, forcing.f, _imopVarPre288, zzcon5, _imopVarPre280, dy4ty1, dnym1, c_print_results, _imopVarPre176, fabs, dx1tx1, ws, _imopVarPre281, dssp, xce.f, zzcon4, dz5tz1, _imopVarPre269, c1, _imopVarPre175, _imopVarPre268, rhs_norm, ws.f, timer_stop, _imopVarPre286, dx5tx1, xcr.f, error_norm, _imopVarPre274, c2, dz3tz1, rhs, step, exact_solution, vs, timer_clear, yycon2, ce, _imopVarPre172, forcing, tx2, _imopVarPre287, con43, dx3tx1, _imopVarPre275, dnzm1, dz1tz1])
#pragma omp barrier
#pragma omp master
{
timer_clear(1);
timer_start(1);
}
}
for (step = 1; step <= niter; step++) {
#pragma omp parallel
{
int _imopVarPre172;
#pragma omp master
{
_imopVarPre172 = step % 20 == 0;
if (!_imopVarPre172) {
_imopVarPre172 = step == 1;
}
if (_imopVarPre172) {
printf(" Time step %4d\n", step);
}
}
int i;
int j;
int k;
int m;
double rho_inv;
double uijk;
double up1;
double um1;
double vijk;
double vp1;
double vm1;
double wijk;
double wp1;
double wm1;
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
rho_inv = 1.0 / u[i][j][k][0];
rho_i[i][j][k] = rho_inv;
us[i][j][k] = u[i][j][k][1] * rho_inv;
vs[i][j][k] = u[i][j][k][2] * rho_inv;
ws[i][j][k] = u[i][j][k][3] * rho_inv;
square[i][j][k] = 0.5 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * rho_inv;
qs[i][j][k] = square[i][j][k] * rho_inv;
}
}
}
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = forcing[i][j][k][m];
}
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
uijk = us[i][j][k];
up1 = us[i + 1][j][k];
um1 = us[i - 1][j][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * (u[i + 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i - 1][j][k][0]) - tx2 * (u[i + 1][j][k][1] - u[i - 1][j][k][1]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * (u[i + 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i - 1][j][k][1]) + xxcon2 * con43 * (up1 - 2.0 * uijk + um1) - tx2 * (u[i + 1][j][k][1] * up1 - u[i - 1][j][k][1] * um1 + (u[i + 1][j][k][4] - square[i + 1][j][k] - u[i - 1][j][k][4] + square[i - 1][j][k]) * c2);
rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * (u[i + 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i - 1][j][k][2]) + xxcon2 * (vs[i + 1][j][k] - 2.0 * vs[i][j][k] + vs[i - 1][j][k]) - tx2 * (u[i + 1][j][k][2] * up1 - u[i - 1][j][k][2] * um1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * (u[i + 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i - 1][j][k][3]) + xxcon2 * (ws[i + 1][j][k] - 2.0 * ws[i][j][k] + ws[i - 1][j][k]) - tx2 * (u[i + 1][j][k][3] * up1 - u[i - 1][j][k][3] * um1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * (u[i + 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i - 1][j][k][4]) + xxcon3 * (qs[i + 1][j][k] - 2.0 * qs[i][j][k] + qs[i - 1][j][k]) + xxcon4 * (up1 * up1 - 2.0 * uijk * uijk + um1 * um1) + xxcon5 * (u[i + 1][j][k][4] * rho_i[i + 1][j][k] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i - 1][j][k][4] * rho_i[i - 1][j][k]) - tx2 * ((c1 * u[i + 1][j][k][4] - c2 * square[i + 1][j][k]) * up1 - (c1 * u[i - 1][j][k][4] - c2 * square[i - 1][j][k]) * um1);
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
i = 1;
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (5.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);
}
}
}
i = 2;
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);
}
}
}
#pragma omp for nowait
for (i = 3; i < grid_points[0] - 3; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);
}
}
}
}
i = grid_points[0] - 3;
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m]);
}
}
}
i = grid_points[0] - 2;
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4. * u[i - 1][j][k][m] + 5.0 * u[i][j][k][m]);
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
vijk = vs[i][j][k];
vp1 = vs[i][j + 1][k];
vm1 = vs[i][j - 1][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * (u[i][j + 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j - 1][k][0]) - ty2 * (u[i][j + 1][k][2] - u[i][j - 1][k][2]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * (u[i][j + 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j - 1][k][1]) + yycon2 * (us[i][j + 1][k] - 2.0 * us[i][j][k] + us[i][j - 1][k]) - ty2 * (u[i][j + 1][k][1] * vp1 - u[i][j - 1][k][1] * vm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * (u[i][j + 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j - 1][k][2]) + yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) - ty2 * (u[i][j + 1][k][2] * vp1 - u[i][j - 1][k][2] * vm1 + (u[i][j + 1][k][4] - square[i][j + 1][k] - u[i][j - 1][k][4] + square[i][j - 1][k]) * c2);
rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * (u[i][j + 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j - 1][k][3]) + yycon2 * (ws[i][j + 1][k] - 2.0 * ws[i][j][k] + ws[i][j - 1][k]) - ty2 * (u[i][j + 1][k][3] * vp1 - u[i][j - 1][k][3] * vm1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * (u[i][j + 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j - 1][k][4]) + yycon3 * (qs[i][j + 1][k] - 2.0 * qs[i][j][k] + qs[i][j - 1][k]) + yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk + vm1 * vm1) + yycon5 * (u[i][j + 1][k][4] * rho_i[i][j + 1][k] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i][j - 1][k][4] * rho_i[i][j - 1][k]) - ty2 * ((c1 * u[i][j + 1][k][4] - c2 * square[i][j + 1][k]) * vp1 - (c1 * u[i][j - 1][k][4] - c2 * square[i][j - 1][k]) * vm1);
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
j = 1;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (5.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);
}
}
}
j = 2;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);
}
}
}
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 3; j < grid_points[1] - 3; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);
}
}
}
}
j = grid_points[1] - 3;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m]);
}
}
}
j = grid_points[1] - 2;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4. * u[i][j - 1][k][m] + 5. * u[i][j][k][m]);
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
wijk = ws[i][j][k];
wp1 = ws[i][j][k + 1];
wm1 = ws[i][j][k - 1];
rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * (u[i][j][k + 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k - 1][0]) - tz2 * (u[i][j][k + 1][3] - u[i][j][k - 1][3]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * (u[i][j][k + 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k - 1][1]) + zzcon2 * (us[i][j][k + 1] - 2.0 * us[i][j][k] + us[i][j][k - 1]) - tz2 * (u[i][j][k + 1][1] * wp1 - u[i][j][k - 1][1] * wm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * (u[i][j][k + 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k - 1][2]) + zzcon2 * (vs[i][j][k + 1] - 2.0 * vs[i][j][k] + vs[i][j][k - 1]) - tz2 * (u[i][j][k + 1][2] * wp1 - u[i][j][k - 1][2] * wm1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * (u[i][j][k + 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k - 1][3]) + zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) - tz2 * (u[i][j][k + 1][3] * wp1 - u[i][j][k - 1][3] * wm1 + (u[i][j][k + 1][4] - square[i][j][k + 1] - u[i][j][k - 1][4] + square[i][j][k - 1]) * c2);
rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * (u[i][j][k + 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k - 1][4]) + zzcon3 * (qs[i][j][k + 1] - 2.0 * qs[i][j][k] + qs[i][j][k - 1]) + zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk + wm1 * wm1) + zzcon5 * (u[i][j][k + 1][4] * rho_i[i][j][k + 1] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i][j][k - 1][4] * rho_i[i][j][k - 1]) - tz2 * ((c1 * u[i][j][k + 1][4] - c2 * square[i][j][k + 1]) * wp1 - (c1 * u[i][j][k - 1][4] - c2 * square[i][j][k - 1]) * wm1);
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
k = 1;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (5.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);
}
}
}
k = 2;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);
}
}
}
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 3; k < grid_points[2] - 3; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);
}
}
}
}
k = grid_points[2] - 3;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m]);
}
}
}
k = grid_points[2] - 2;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 5.0 * u[i][j][k][m]);
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
for (i = 1; i < grid_points[0] - 1; i++) {
rhs[i][j][k][m] = rhs[i][j][k][m] * dt;
}
}
}
}
{
int i;
int j;
int k;
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (i = 0; i < grid_points[0]; i++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[i][j][k][0][0] = 0.0;
fjac[i][j][k][0][1] = 1.0;
fjac[i][j][k][0][2] = 0.0;
fjac[i][j][k][0][3] = 0.0;
fjac[i][j][k][0][4] = 0.0;
fjac[i][j][k][1][0] = -(u[i][j][k][1] * tmp2 * u[i][j][k][1]) + c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2;
fjac[i][j][k][1][1] = (2.0 - c2) * (u[i][j][k][1] / u[i][j][k][0]);
fjac[i][j][k][1][2] = -c2 * (u[i][j][k][2] * tmp1);
fjac[i][j][k][1][3] = -c2 * (u[i][j][k][3] * tmp1);
fjac[i][j][k][1][4] = c2;
fjac[i][j][k][2][0] = -(u[i][j][k][1] * u[i][j][k][2]) * tmp2;
fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1;
fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1;
fjac[i][j][k][2][3] = 0.0;
fjac[i][j][k][2][4] = 0.0;
fjac[i][j][k][3][0] = -(u[i][j][k][1] * u[i][j][k][3]) * tmp2;
fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1;
fjac[i][j][k][3][2] = 0.0;
fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1;
fjac[i][j][k][3][4] = 0.0;
fjac[i][j][k][4][0] = (c2 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2 - c1 * (u[i][j][k][4] * tmp1)) * (u[i][j][k][1] * tmp1);
fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * (3.0 * u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2;
fjac[i][j][k][4][2] = -c2 * (u[i][j][k][2] * u[i][j][k][1]) * tmp2;
fjac[i][j][k][4][3] = -c2 * (u[i][j][k][3] * u[i][j][k][1]) * tmp2;
fjac[i][j][k][4][4] = c1 * (u[i][j][k][1] * tmp1);
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = -con43 * c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = con43 * c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = -c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = -c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = c3c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = -(con43 * c3c4 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c3c4 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c3c4 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = (con43 * c3c4 - c1345) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = (c3c4 - c1345) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = (c3c4 - c1345) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = c1345 * tmp1;
}
for (i = 1; i < grid_points[0] - 1; i++) {
tmp1 = dt * tx1;
tmp2 = dt * tx2;
lhs[i][j][k][0][0][0] = -tmp2 * fjac[i - 1][j][k][0][0] - tmp1 * njac[i - 1][j][k][0][0] - tmp1 * dx1;
lhs[i][j][k][0][0][1] = -tmp2 * fjac[i - 1][j][k][0][1] - tmp1 * njac[i - 1][j][k][0][1];
lhs[i][j][k][0][0][2] = -tmp2 * fjac[i - 1][j][k][0][2] - tmp1 * njac[i - 1][j][k][0][2];
lhs[i][j][k][0][0][3] = -tmp2 * fjac[i - 1][j][k][0][3] - tmp1 * njac[i - 1][j][k][0][3];
lhs[i][j][k][0][0][4] = -tmp2 * fjac[i - 1][j][k][0][4] - tmp1 * njac[i - 1][j][k][0][4];
lhs[i][j][k][0][1][0] = -tmp2 * fjac[i - 1][j][k][1][0] - tmp1 * njac[i - 1][j][k][1][0];
lhs[i][j][k][0][1][1] = -tmp2 * fjac[i - 1][j][k][1][1] - tmp1 * njac[i - 1][j][k][1][1] - tmp1 * dx2;
lhs[i][j][k][0][1][2] = -tmp2 * fjac[i - 1][j][k][1][2] - tmp1 * njac[i - 1][j][k][1][2];
lhs[i][j][k][0][1][3] = -tmp2 * fjac[i - 1][j][k][1][3] - tmp1 * njac[i - 1][j][k][1][3];
lhs[i][j][k][0][1][4] = -tmp2 * fjac[i - 1][j][k][1][4] - tmp1 * njac[i - 1][j][k][1][4];
lhs[i][j][k][0][2][0] = -tmp2 * fjac[i - 1][j][k][2][0] - tmp1 * njac[i - 1][j][k][2][0];
lhs[i][j][k][0][2][1] = -tmp2 * fjac[i - 1][j][k][2][1] - tmp1 * njac[i - 1][j][k][2][1];
lhs[i][j][k][0][2][2] = -tmp2 * fjac[i - 1][j][k][2][2] - tmp1 * njac[i - 1][j][k][2][2] - tmp1 * dx3;
lhs[i][j][k][0][2][3] = -tmp2 * fjac[i - 1][j][k][2][3] - tmp1 * njac[i - 1][j][k][2][3];
lhs[i][j][k][0][2][4] = -tmp2 * fjac[i - 1][j][k][2][4] - tmp1 * njac[i - 1][j][k][2][4];
lhs[i][j][k][0][3][0] = -tmp2 * fjac[i - 1][j][k][3][0] - tmp1 * njac[i - 1][j][k][3][0];
lhs[i][j][k][0][3][1] = -tmp2 * fjac[i - 1][j][k][3][1] - tmp1 * njac[i - 1][j][k][3][1];
lhs[i][j][k][0][3][2] = -tmp2 * fjac[i - 1][j][k][3][2] - tmp1 * njac[i - 1][j][k][3][2];
lhs[i][j][k][0][3][3] = -tmp2 * fjac[i - 1][j][k][3][3] - tmp1 * njac[i - 1][j][k][3][3] - tmp1 * dx4;
lhs[i][j][k][0][3][4] = -tmp2 * fjac[i - 1][j][k][3][4] - tmp1 * njac[i - 1][j][k][3][4];
lhs[i][j][k][0][4][0] = -tmp2 * fjac[i - 1][j][k][4][0] - tmp1 * njac[i - 1][j][k][4][0];
lhs[i][j][k][0][4][1] = -tmp2 * fjac[i - 1][j][k][4][1] - tmp1 * njac[i - 1][j][k][4][1];
lhs[i][j][k][0][4][2] = -tmp2 * fjac[i - 1][j][k][4][2] - tmp1 * njac[i - 1][j][k][4][2];
lhs[i][j][k][0][4][3] = -tmp2 * fjac[i - 1][j][k][4][3] - tmp1 * njac[i - 1][j][k][4][3];
lhs[i][j][k][0][4][4] = -tmp2 * fjac[i - 1][j][k][4][4] - tmp1 * njac[i - 1][j][k][4][4] - tmp1 * dx5;
lhs[i][j][k][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dx1;
lhs[i][j][k][1][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][1][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][1][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][1][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][1][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dx2;
lhs[i][j][k][1][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][1][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][1][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][1][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][1][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dx3;
lhs[i][j][k][1][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][1][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][1][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][1][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][1][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dx4;
lhs[i][j][k][1][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][1][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][1][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][1][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][1][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dx5;
lhs[i][j][k][2][0][0] = tmp2 * fjac[i + 1][j][k][0][0] - tmp1 * njac[i + 1][j][k][0][0] - tmp1 * dx1;
lhs[i][j][k][2][0][1] = tmp2 * fjac[i + 1][j][k][0][1] - tmp1 * njac[i + 1][j][k][0][1];
lhs[i][j][k][2][0][2] = tmp2 * fjac[i + 1][j][k][0][2] - tmp1 * njac[i + 1][j][k][0][2];
lhs[i][j][k][2][0][3] = tmp2 * fjac[i + 1][j][k][0][3] - tmp1 * njac[i + 1][j][k][0][3];
lhs[i][j][k][2][0][4] = tmp2 * fjac[i + 1][j][k][0][4] - tmp1 * njac[i + 1][j][k][0][4];
lhs[i][j][k][2][1][0] = tmp2 * fjac[i + 1][j][k][1][0] - tmp1 * njac[i + 1][j][k][1][0];
lhs[i][j][k][2][1][1] = tmp2 * fjac[i + 1][j][k][1][1] - tmp1 * njac[i + 1][j][k][1][1] - tmp1 * dx2;
lhs[i][j][k][2][1][2] = tmp2 * fjac[i + 1][j][k][1][2] - tmp1 * njac[i + 1][j][k][1][2];
lhs[i][j][k][2][1][3] = tmp2 * fjac[i + 1][j][k][1][3] - tmp1 * njac[i + 1][j][k][1][3];
lhs[i][j][k][2][1][4] = tmp2 * fjac[i + 1][j][k][1][4] - tmp1 * njac[i + 1][j][k][1][4];
lhs[i][j][k][2][2][0] = tmp2 * fjac[i + 1][j][k][2][0] - tmp1 * njac[i + 1][j][k][2][0];
lhs[i][j][k][2][2][1] = tmp2 * fjac[i + 1][j][k][2][1] - tmp1 * njac[i + 1][j][k][2][1];
lhs[i][j][k][2][2][2] = tmp2 * fjac[i + 1][j][k][2][2] - tmp1 * njac[i + 1][j][k][2][2] - tmp1 * dx3;
lhs[i][j][k][2][2][3] = tmp2 * fjac[i + 1][j][k][2][3] - tmp1 * njac[i + 1][j][k][2][3];
lhs[i][j][k][2][2][4] = tmp2 * fjac[i + 1][j][k][2][4] - tmp1 * njac[i + 1][j][k][2][4];
lhs[i][j][k][2][3][0] = tmp2 * fjac[i + 1][j][k][3][0] - tmp1 * njac[i + 1][j][k][3][0];
lhs[i][j][k][2][3][1] = tmp2 * fjac[i + 1][j][k][3][1] - tmp1 * njac[i + 1][j][k][3][1];
lhs[i][j][k][2][3][2] = tmp2 * fjac[i + 1][j][k][3][2] - tmp1 * njac[i + 1][j][k][3][2];
lhs[i][j][k][2][3][3] = tmp2 * fjac[i + 1][j][k][3][3] - tmp1 * njac[i + 1][j][k][3][3] - tmp1 * dx4;
lhs[i][j][k][2][3][4] = tmp2 * fjac[i + 1][j][k][3][4] - tmp1 * njac[i + 1][j][k][3][4];
lhs[i][j][k][2][4][0] = tmp2 * fjac[i + 1][j][k][4][0] - tmp1 * njac[i + 1][j][k][4][0];
lhs[i][j][k][2][4][1] = tmp2 * fjac[i + 1][j][k][4][1] - tmp1 * njac[i + 1][j][k][4][1];
lhs[i][j][k][2][4][2] = tmp2 * fjac[i + 1][j][k][4][2] - tmp1 * njac[i + 1][j][k][4][2];
lhs[i][j][k][2][4][3] = tmp2 * fjac[i + 1][j][k][4][3] - tmp1 * njac[i + 1][j][k][4][3];
lhs[i][j][k][2][4][4] = tmp2 * fjac[i + 1][j][k][4][4] - tmp1 * njac[i + 1][j][k][4][4] - tmp1 * dx5;
}
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
int i_imopVar9;
int j_imopVar10;
int k_imopVar11;
int isize;
isize = grid_points[0] - 1;
#pragma omp for nowait
for (j_imopVar10 = 1; j_imopVar10 < grid_points[1] - 1; j_imopVar10++) {
for (k_imopVar11 = 1; k_imopVar11 < grid_points[2] - 1; k_imopVar11++) {
double ( *_imopVarPre338 );
double ( *_imopVarPre339 )[5];
double ( *_imopVarPre340 )[5];
_imopVarPre338 = rhs[0][j_imopVar10][k_imopVar11];
_imopVarPre339 = lhs[0][j_imopVar10][k_imopVar11][2];
_imopVarPre340 = lhs[0][j_imopVar10][k_imopVar11][1];
binvcrhs(_imopVarPre340, _imopVarPre339, _imopVarPre338);
}
}
// #pragma omp dummyFlush BARRIER_START written() read()
#pragma omp barrier
for (i_imopVar9 = 1; i_imopVar9 < isize; i_imopVar9++) {
#pragma omp for nowait
for (j_imopVar10 = 1; j_imopVar10 < grid_points[1] - 1; j_imopVar10++) {
for (k_imopVar11 = 1; k_imopVar11 < grid_points[2] - 1; k_imopVar11++) {
double ( *_imopVarPre344 );
double ( *_imopVarPre345 );
double ( *_imopVarPre346 )[5];
_imopVarPre344 = rhs[i_imopVar9][j_imopVar10][k_imopVar11];
_imopVarPre345 = rhs[i_imopVar9 - 1][j_imopVar10][k_imopVar11];
_imopVarPre346 = lhs[i_imopVar9][j_imopVar10][k_imopVar11][0];
matvec_sub(_imopVarPre346, _imopVarPre345, _imopVarPre344);
double ( *_imopVarPre350 )[5];
double ( *_imopVarPre351 )[5];
double ( *_imopVarPre352 )[5];
_imopVarPre350 = lhs[i_imopVar9][j_imopVar10][k_imopVar11][1];
_imopVarPre351 = lhs[i_imopVar9 - 1][j_imopVar10][k_imopVar11][2];
_imopVarPre352 = lhs[i_imopVar9][j_imopVar10][k_imopVar11][0];
matmul_sub(_imopVarPre352, _imopVarPre351, _imopVarPre350);
double ( *_imopVarPre356 );
double ( *_imopVarPre357 )[5];
double ( *_imopVarPre358 )[5];
_imopVarPre356 = rhs[i_imopVar9][j_imopVar10][k_imopVar11];
_imopVarPre357 = lhs[i_imopVar9][j_imopVar10][k_imopVar11][2];
_imopVarPre358 = lhs[i_imopVar9][j_imopVar10][k_imopVar11][1];
binvcrhs(_imopVarPre358, _imopVarPre357, _imopVarPre356);
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, grid_points.f, grid_points, lhs, binvrhs, matvec_sub, lhs.f, matmul_sub, binvcrhs, j_imopVar10])
#pragma omp barrier
}
#pragma omp for nowait
for (j_imopVar10 = 1; j_imopVar10 < grid_points[1] - 1; j_imopVar10++) {
for (k_imopVar11 = 1; k_imopVar11 < grid_points[2] - 1; k_imopVar11++) {
double ( *_imopVarPre362 );
double ( *_imopVarPre363 );
double ( *_imopVarPre364 )[5];
_imopVarPre362 = rhs[isize][j_imopVar10][k_imopVar11];
_imopVarPre363 = rhs[isize - 1][j_imopVar10][k_imopVar11];
_imopVarPre364 = lhs[isize][j_imopVar10][k_imopVar11][0];
matvec_sub(_imopVarPre364, _imopVarPre363, _imopVarPre362);
double ( *_imopVarPre368 )[5];
double ( *_imopVarPre369 )[5];
double ( *_imopVarPre370 )[5];
_imopVarPre368 = lhs[isize][j_imopVar10][k_imopVar11][1];
_imopVarPre369 = lhs[isize - 1][j_imopVar10][k_imopVar11][2];
_imopVarPre370 = lhs[isize][j_imopVar10][k_imopVar11][0];
matmul_sub(_imopVarPre370, _imopVarPre369, _imopVarPre368);
double ( *_imopVarPre373 );
double ( *_imopVarPre374 )[5];
_imopVarPre373 = rhs[i_imopVar9][j_imopVar10][k_imopVar11];
_imopVarPre374 = lhs[i_imopVar9][j_imopVar10][k_imopVar11][1];
binvrhs(_imopVarPre374, _imopVarPre373);
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, grid_points.f, grid_points, lhs, lhs.f, j_imopVar7])
#pragma omp barrier
int i_imopVar6;
int j_imopVar7;
int k_imopVar8;
int m;
int n;
for (i_imopVar6 = grid_points[0] - 2; i_imopVar6 >= 0; i_imopVar6--) {
#pragma omp for nowait
for (j_imopVar7 = 1; j_imopVar7 < grid_points[1] - 1; j_imopVar7++) {
for (k_imopVar8 = 1; k_imopVar8 < grid_points[2] - 1; k_imopVar8++) {
for (m = 0; m < 5; m++) {
for (n = 0; n < 5; n++) {
rhs[i_imopVar6][j_imopVar7][k_imopVar8][m] = rhs[i_imopVar6][j_imopVar7][k_imopVar8][m] - lhs[i_imopVar6][j_imopVar7][k_imopVar8][2][m][n] * rhs[i_imopVar6 + 1][j_imopVar7][k_imopVar8][n];
}
}
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, rhs, grid_points.f, grid_points, lhs, lhs.f, j_imopVar7])
#pragma omp barrier
}
}
// #pragma omp dummyFlush BARRIER_START written([]) read([u, u.f, fjac.f, tmp3, fjac, grid_points.f, tmp1, tmp2, grid_points, i_imopVar3, c3c4, njac, njac.f, con43, c2, c1345, c1])
#pragma omp barrier
{
int i_imopVar3;
int j_imopVar4;
int k_imopVar5;
#pragma omp for nowait
for (i_imopVar3 = 1; i_imopVar3 < grid_points[0] - 1; i_imopVar3++) {
for (j_imopVar4 = 0; j_imopVar4 < grid_points[1]; j_imopVar4++) {
for (k_imopVar5 = 1; k_imopVar5 < grid_points[2] - 1; k_imopVar5++) {
tmp1 = 1.0 / u[i_imopVar3][j_imopVar4][k_imopVar5][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][0][0] = 0.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][0][1] = 0.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][0][2] = 1.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][0][3] = 0.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][0][4] = 0.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][1][0] = -(u[i_imopVar3][j_imopVar4][k_imopVar5][1] * u[i_imopVar3][j_imopVar4][k_imopVar5][2]) * tmp2;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][1][1] = u[i_imopVar3][j_imopVar4][k_imopVar5][2] * tmp1;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][1][2] = u[i_imopVar3][j_imopVar4][k_imopVar5][1] * tmp1;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][1][3] = 0.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][1][4] = 0.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][2][0] = -(u[i_imopVar3][j_imopVar4][k_imopVar5][2] * u[i_imopVar3][j_imopVar4][k_imopVar5][2] * tmp2) + 0.50 * c2 * ((u[i_imopVar3][j_imopVar4][k_imopVar5][1] * u[i_imopVar3][j_imopVar4][k_imopVar5][1] + u[i_imopVar3][j_imopVar4][k_imopVar5][2] * u[i_imopVar3][j_imopVar4][k_imopVar5][2] + u[i_imopVar3][j_imopVar4][k_imopVar5][3] * u[i_imopVar3][j_imopVar4][k_imopVar5][3]) * tmp2);
fjac[i_imopVar3][j_imopVar4][k_imopVar5][2][1] = -c2 * u[i_imopVar3][j_imopVar4][k_imopVar5][1] * tmp1;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][2][2] = (2.0 - c2) * u[i_imopVar3][j_imopVar4][k_imopVar5][2] * tmp1;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][2][3] = -c2 * u[i_imopVar3][j_imopVar4][k_imopVar5][3] * tmp1;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][2][4] = c2;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][3][0] = -(u[i_imopVar3][j_imopVar4][k_imopVar5][2] * u[i_imopVar3][j_imopVar4][k_imopVar5][3]) * tmp2;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][3][1] = 0.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][3][2] = u[i_imopVar3][j_imopVar4][k_imopVar5][3] * tmp1;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][3][3] = u[i_imopVar3][j_imopVar4][k_imopVar5][2] * tmp1;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][3][4] = 0.0;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][4][0] = (c2 * (u[i_imopVar3][j_imopVar4][k_imopVar5][1] * u[i_imopVar3][j_imopVar4][k_imopVar5][1] + u[i_imopVar3][j_imopVar4][k_imopVar5][2] * u[i_imopVar3][j_imopVar4][k_imopVar5][2] + u[i_imopVar3][j_imopVar4][k_imopVar5][3] * u[i_imopVar3][j_imopVar4][k_imopVar5][3]) * tmp2 - c1 * u[i_imopVar3][j_imopVar4][k_imopVar5][4] * tmp1) * u[i_imopVar3][j_imopVar4][k_imopVar5][2] * tmp1;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][4][1] = -c2 * u[i_imopVar3][j_imopVar4][k_imopVar5][1] * u[i_imopVar3][j_imopVar4][k_imopVar5][2] * tmp2;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][4][2] = c1 * u[i_imopVar3][j_imopVar4][k_imopVar5][4] * tmp1 - 0.50 * c2 * ((u[i_imopVar3][j_imopVar4][k_imopVar5][1] * u[i_imopVar3][j_imopVar4][k_imopVar5][1] + 3.0 * u[i_imopVar3][j_imopVar4][k_imopVar5][2] * u[i_imopVar3][j_imopVar4][k_imopVar5][2] + u[i_imopVar3][j_imopVar4][k_imopVar5][3] * u[i_imopVar3][j_imopVar4][k_imopVar5][3]) * tmp2);
fjac[i_imopVar3][j_imopVar4][k_imopVar5][4][3] = -c2 * (u[i_imopVar3][j_imopVar4][k_imopVar5][2] * u[i_imopVar3][j_imopVar4][k_imopVar5][3]) * tmp2;
fjac[i_imopVar3][j_imopVar4][k_imopVar5][4][4] = c1 * u[i_imopVar3][j_imopVar4][k_imopVar5][2] * tmp1;
njac[i_imopVar3][j_imopVar4][k_imopVar5][0][0] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][0][1] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][0][2] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][0][3] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][0][4] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][1][0] = -c3c4 * tmp2 * u[i_imopVar3][j_imopVar4][k_imopVar5][1];
njac[i_imopVar3][j_imopVar4][k_imopVar5][1][1] = c3c4 * tmp1;
njac[i_imopVar3][j_imopVar4][k_imopVar5][1][2] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][1][3] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][1][4] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][2][0] = -con43 * c3c4 * tmp2 * u[i_imopVar3][j_imopVar4][k_imopVar5][2];
njac[i_imopVar3][j_imopVar4][k_imopVar5][2][1] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][2][2] = con43 * c3c4 * tmp1;
njac[i_imopVar3][j_imopVar4][k_imopVar5][2][3] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][2][4] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][3][0] = -c3c4 * tmp2 * u[i_imopVar3][j_imopVar4][k_imopVar5][3];
njac[i_imopVar3][j_imopVar4][k_imopVar5][3][1] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][3][2] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][3][3] = c3c4 * tmp1;
njac[i_imopVar3][j_imopVar4][k_imopVar5][3][4] = 0.0;
njac[i_imopVar3][j_imopVar4][k_imopVar5][4][0] = -(c3c4 - c1345) * tmp3 * (((u[i_imopVar3][j_imopVar4][k_imopVar5][1]) * (u[i_imopVar3][j_imopVar4][k_imopVar5][1]))) - (con43 * c3c4 - c1345) * tmp3 * (((u[i_imopVar3][j_imopVar4][k_imopVar5][2]) * (u[i_imopVar3][j_imopVar4][k_imopVar5][2]))) - (c3c4 - c1345) * tmp3 * (((u[i_imopVar3][j_imopVar4][k_imopVar5][3]) * (u[i_imopVar3][j_imopVar4][k_imopVar5][3]))) - c1345 * tmp2 * u[i_imopVar3][j_imopVar4][k_imopVar5][4];
njac[i_imopVar3][j_imopVar4][k_imopVar5][4][1] = (c3c4 - c1345) * tmp2 * u[i_imopVar3][j_imopVar4][k_imopVar5][1];
njac[i_imopVar3][j_imopVar4][k_imopVar5][4][2] = (con43 * c3c4 - c1345) * tmp2 * u[i_imopVar3][j_imopVar4][k_imopVar5][2];
njac[i_imopVar3][j_imopVar4][k_imopVar5][4][3] = (c3c4 - c1345) * tmp2 * u[i_imopVar3][j_imopVar4][k_imopVar5][3];
njac[i_imopVar3][j_imopVar4][k_imopVar5][4][4] = c1345 * tmp1;
}
}
}
// #pragma omp dummyFlush BARRIER_START written([fjac.f, tmp3, tmp1, tmp2, njac.f]) read([fjac.f, dt, fjac, tmp1, grid_points.f, tmp2, grid_points, dy5, i_imopVar3, njac, njac.f, ty2, ty1, lhs, dy2, lhs.f, dy1, dy4, dy3])
#pragma omp barrier
#pragma omp for nowait
for (i_imopVar3 = 1; i_imopVar3 < grid_points[0] - 1; i_imopVar3++) {
for (j_imopVar4 = 1; j_imopVar4 < grid_points[1] - 1; j_imopVar4++) {
for (k_imopVar5 = 1; k_imopVar5 < grid_points[2] - 1; k_imopVar5++) {
tmp1 = dt * ty1;
tmp2 = dt * ty2;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][0][0] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][0] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][0] - tmp1 * dy1;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][0][1] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][1] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][0][2] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][2] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][0][3] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][3] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][0][4] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][4] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][0][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][1][0] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][0] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][1][1] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][1] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][1] - tmp1 * dy2;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][1][2] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][2] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][1][3] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][3] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][1][4] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][4] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][1][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][2][0] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][0] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][2][1] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][1] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][2][2] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][2] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][2] - tmp1 * dy3;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][2][3] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][3] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][2][4] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][4] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][2][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][3][0] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][0] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][3][1] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][1] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][3][2] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][2] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][3][3] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][3] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][3] - tmp1 * dy4;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][3][4] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][4] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][3][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][4][0] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][0] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][4][1] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][1] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][4][2] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][2] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][4][3] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][3] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][0][4][4] = -tmp2 * fjac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][4] - tmp1 * njac[i_imopVar3][j_imopVar4 - 1][k_imopVar5][4][4] - tmp1 * dy5;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][0][0] + tmp1 * 2.0 * dy1;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][0][1] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][0][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][0][2] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][0][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][0][3] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][0][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][0][4] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][0][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][1][0] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][1][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][1][1] + tmp1 * 2.0 * dy2;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][1][2] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][1][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][1][3] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][1][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][1][4] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][1][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][2][0] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][2][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][2][1] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][2][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][2][2] + tmp1 * 2.0 * dy3;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][2][3] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][2][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][2][4] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][2][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][3][0] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][3][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][3][1] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][3][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][3][2] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][3][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][3][3] + tmp1 * 2.0 * dy4;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][3][4] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][3][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][4][0] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][4][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][4][1] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][4][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][4][2] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][4][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][4][3] = tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][4][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i_imopVar3][j_imopVar4][k_imopVar5][4][4] + tmp1 * 2.0 * dy5;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][0][0] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][0] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][0] - tmp1 * dy1;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][0][1] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][1] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][0][2] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][2] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][0][3] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][3] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][0][4] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][4] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][0][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][1][0] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][0] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][1][1] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][1] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][1] - tmp1 * dy2;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][1][2] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][2] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][1][3] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][3] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][1][4] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][4] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][1][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][2][0] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][0] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][2][1] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][1] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][2][2] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][2] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][2] - tmp1 * dy3;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][2][3] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][3] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][2][4] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][4] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][2][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][3][0] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][0] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][3][1] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][1] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][3][2] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][2] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][3][3] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][3] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][3] - tmp1 * dy4;
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][3][4] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][4] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][3][4];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][4][0] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][0] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][0];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][4][1] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][1] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][1];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][4][2] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][2] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][2];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][4][3] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][3] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][3];
lhs[i_imopVar3][j_imopVar4][k_imopVar5][2][4][4] = tmp2 * fjac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][4] - tmp1 * njac[i_imopVar3][j_imopVar4 + 1][k_imopVar5][4][4] - tmp1 * dy5;
}
}
}
// #pragma omp dummyFlush BARRIER_START written([tmp1, tmp2, lhs.f]) read([rhs.f, rhs, grid_points.f, grid_points, i_imopVar0, lhs, lhs.f, binvcrhs])
#pragma omp barrier
int i_imopVar0;
int j_imopVar1;
int k_imopVar2;
int jsize;
jsize = grid_points[1] - 1;
#pragma omp for nowait
for (i_imopVar0 = 1; i_imopVar0 < grid_points[0] - 1; i_imopVar0++) {
for (k_imopVar2 = 1; k_imopVar2 < grid_points[2] - 1; k_imopVar2++) {
double ( *_imopVarPre378 );
double ( *_imopVarPre379 )[5];
double ( *_imopVarPre380 )[5];
_imopVarPre378 = rhs[i_imopVar0][0][k_imopVar2];
_imopVarPre379 = lhs[i_imopVar0][0][k_imopVar2][2];
_imopVarPre380 = lhs[i_imopVar0][0][k_imopVar2][1];
binvcrhs(_imopVarPre380, _imopVarPre379, _imopVarPre378);
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, grid_points.f, i_imopVar0, grid_points, lhs, binvrhs, matvec_sub, lhs.f, binvcrhs, matmul_sub])
#pragma omp barrier
for (j_imopVar1 = 1; j_imopVar1 < jsize; j_imopVar1++) {
#pragma omp for nowait
for (i_imopVar0 = 1; i_imopVar0 < grid_points[0] - 1; i_imopVar0++) {
for (k_imopVar2 = 1; k_imopVar2 < grid_points[2] - 1; k_imopVar2++) {
double ( *_imopVarPre384 );
double ( *_imopVarPre385 );
double ( *_imopVarPre386 )[5];
_imopVarPre384 = rhs[i_imopVar0][j_imopVar1][k_imopVar2];
_imopVarPre385 = rhs[i_imopVar0][j_imopVar1 - 1][k_imopVar2];
_imopVarPre386 = lhs[i_imopVar0][j_imopVar1][k_imopVar2][0];
matvec_sub(_imopVarPre386, _imopVarPre385, _imopVarPre384);
double ( *_imopVarPre390 )[5];
double ( *_imopVarPre391 )[5];
double ( *_imopVarPre392 )[5];
_imopVarPre390 = lhs[i_imopVar0][j_imopVar1][k_imopVar2][1];
_imopVarPre391 = lhs[i_imopVar0][j_imopVar1 - 1][k_imopVar2][2];
_imopVarPre392 = lhs[i_imopVar0][j_imopVar1][k_imopVar2][0];
matmul_sub(_imopVarPre392, _imopVarPre391, _imopVarPre390);
double ( *_imopVarPre396 );
double ( *_imopVarPre397 )[5];
double ( *_imopVarPre398 )[5];
_imopVarPre396 = rhs[i_imopVar0][j_imopVar1][k_imopVar2];
_imopVarPre397 = lhs[i_imopVar0][j_imopVar1][k_imopVar2][2];
_imopVarPre398 = lhs[i_imopVar0][j_imopVar1][k_imopVar2][1];
binvcrhs(_imopVarPre398, _imopVarPre397, _imopVarPre396);
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, grid_points.f, i_imopVar0, grid_points, lhs, binvrhs, matvec_sub, lhs.f, binvcrhs, matmul_sub])
#pragma omp barrier
}
#pragma omp for nowait
for (i_imopVar0 = 1; i_imopVar0 < grid_points[0] - 1; i_imopVar0++) {
for (k_imopVar2 = 1; k_imopVar2 < grid_points[2] - 1; k_imopVar2++) {
double ( *_imopVarPre402 );
double ( *_imopVarPre403 );
double ( *_imopVarPre404 )[5];
_imopVarPre402 = rhs[i_imopVar0][jsize][k_imopVar2];
_imopVarPre403 = rhs[i_imopVar0][jsize - 1][k_imopVar2];
_imopVarPre404 = lhs[i_imopVar0][jsize][k_imopVar2][0];
matvec_sub(_imopVarPre404, _imopVarPre403, _imopVarPre402);
double ( *_imopVarPre408 )[5];
double ( *_imopVarPre409 )[5];
double ( *_imopVarPre410 )[5];
_imopVarPre408 = lhs[i_imopVar0][jsize][k_imopVar2][1];
_imopVarPre409 = lhs[i_imopVar0][jsize - 1][k_imopVar2][2];
_imopVarPre410 = lhs[i_imopVar0][jsize][k_imopVar2][0];
matmul_sub(_imopVarPre410, _imopVarPre409, _imopVarPre408);
double ( *_imopVarPre413 );
double ( *_imopVarPre414 )[5];
_imopVarPre413 = rhs[i_imopVar0][jsize][k_imopVar2];
_imopVarPre414 = lhs[i_imopVar0][jsize][k_imopVar2][1];
binvrhs(_imopVarPre414, _imopVarPre413);
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, i, grid_points.f, grid_points, lhs, lhs.f])
#pragma omp barrier
int i;
int j;
int k;
int m;
int n;
for (j = grid_points[1] - 2; j >= 0; j--) {
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
for (n = 0; n < 5; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][2][m][n] * rhs[i][j + 1][k][n];
}
}
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, rhs, i, grid_points.f, grid_points, lhs, lhs.f])
#pragma omp barrier
}
}
// #pragma omp dummyFlush BARRIER_START written([]) read([u, fjac.f, u.f, c4, tmp3, fjac, c3, tmp1, grid_points.f, tmp2, grid_points, c3c4, njac, njac.f, con43, i, c1345, c2, c1])
#pragma omp barrier
{
int i;
int j;
int k;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 0; k < grid_points[2]; k++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[i][j][k][0][0] = 0.0;
fjac[i][j][k][0][1] = 0.0;
fjac[i][j][k][0][2] = 0.0;
fjac[i][j][k][0][3] = 1.0;
fjac[i][j][k][0][4] = 0.0;
fjac[i][j][k][1][0] = -(u[i][j][k][1] * u[i][j][k][3]) * tmp2;
fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1;
fjac[i][j][k][1][2] = 0.0;
fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1;
fjac[i][j][k][1][4] = 0.0;
fjac[i][j][k][2][0] = -(u[i][j][k][2] * u[i][j][k][3]) * tmp2;
fjac[i][j][k][2][1] = 0.0;
fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1;
fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1;
fjac[i][j][k][2][4] = 0.0;
fjac[i][j][k][3][0] = -(u[i][j][k][3] * u[i][j][k][3] * tmp2) + 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2);
fjac[i][j][k][3][1] = -c2 * u[i][j][k][1] * tmp1;
fjac[i][j][k][3][2] = -c2 * u[i][j][k][2] * tmp1;
fjac[i][j][k][3][3] = (2.0 - c2) * u[i][j][k][3] * tmp1;
fjac[i][j][k][3][4] = c2;
fjac[i][j][k][4][0] = (c2 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * tmp2 - c1 * (u[i][j][k][4] * tmp1)) * (u[i][j][k][3] * tmp1);
fjac[i][j][k][4][1] = -c2 * (u[i][j][k][1] * u[i][j][k][3]) * tmp2;
fjac[i][j][k][4][2] = -c2 * (u[i][j][k][2] * u[i][j][k][3]) * tmp2;
fjac[i][j][k][4][3] = c1 * (u[i][j][k][4] * tmp1) - 0.50 * c2 * ((u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + 3.0 * u[i][j][k][3] * u[i][j][k][3]) * tmp2);
fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1;
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = -c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = -c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = -con43 * c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = -(c3c4 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c3c4 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (con43 * c3c4 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = (c3c4 - c1345) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = (c3c4 - c1345) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = (con43 * c3c4 - c1345) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = c1345 * tmp1;
}
}
}
// #pragma omp dummyFlush BARRIER_START written([fjac.f, tmp3, tmp1, tmp2, njac.f]) read([fjac.f, dt, fjac, grid_points.f, tmp1, grid_points, tmp2, njac, njac.f, tz2, lhs, i, tz1, dz3, lhs.f, dz2, dz5, dz4, dz1])
#pragma omp barrier
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
tmp1 = dt * tz1;
tmp2 = dt * tz2;
lhs[i][j][k][0][0][0] = -tmp2 * fjac[i][j][k - 1][0][0] - tmp1 * njac[i][j][k - 1][0][0] - tmp1 * dz1;
lhs[i][j][k][0][0][1] = -tmp2 * fjac[i][j][k - 1][0][1] - tmp1 * njac[i][j][k - 1][0][1];
lhs[i][j][k][0][0][2] = -tmp2 * fjac[i][j][k - 1][0][2] - tmp1 * njac[i][j][k - 1][0][2];
lhs[i][j][k][0][0][3] = -tmp2 * fjac[i][j][k - 1][0][3] - tmp1 * njac[i][j][k - 1][0][3];
lhs[i][j][k][0][0][4] = -tmp2 * fjac[i][j][k - 1][0][4] - tmp1 * njac[i][j][k - 1][0][4];
lhs[i][j][k][0][1][0] = -tmp2 * fjac[i][j][k - 1][1][0] - tmp1 * njac[i][j][k - 1][1][0];
lhs[i][j][k][0][1][1] = -tmp2 * fjac[i][j][k - 1][1][1] - tmp1 * njac[i][j][k - 1][1][1] - tmp1 * dz2;
lhs[i][j][k][0][1][2] = -tmp2 * fjac[i][j][k - 1][1][2] - tmp1 * njac[i][j][k - 1][1][2];
lhs[i][j][k][0][1][3] = -tmp2 * fjac[i][j][k - 1][1][3] - tmp1 * njac[i][j][k - 1][1][3];
lhs[i][j][k][0][1][4] = -tmp2 * fjac[i][j][k - 1][1][4] - tmp1 * njac[i][j][k - 1][1][4];
lhs[i][j][k][0][2][0] = -tmp2 * fjac[i][j][k - 1][2][0] - tmp1 * njac[i][j][k - 1][2][0];
lhs[i][j][k][0][2][1] = -tmp2 * fjac[i][j][k - 1][2][1] - tmp1 * njac[i][j][k - 1][2][1];
lhs[i][j][k][0][2][2] = -tmp2 * fjac[i][j][k - 1][2][2] - tmp1 * njac[i][j][k - 1][2][2] - tmp1 * dz3;
lhs[i][j][k][0][2][3] = -tmp2 * fjac[i][j][k - 1][2][3] - tmp1 * njac[i][j][k - 1][2][3];
lhs[i][j][k][0][2][4] = -tmp2 * fjac[i][j][k - 1][2][4] - tmp1 * njac[i][j][k - 1][2][4];
lhs[i][j][k][0][3][0] = -tmp2 * fjac[i][j][k - 1][3][0] - tmp1 * njac[i][j][k - 1][3][0];
lhs[i][j][k][0][3][1] = -tmp2 * fjac[i][j][k - 1][3][1] - tmp1 * njac[i][j][k - 1][3][1];
lhs[i][j][k][0][3][2] = -tmp2 * fjac[i][j][k - 1][3][2] - tmp1 * njac[i][j][k - 1][3][2];
lhs[i][j][k][0][3][3] = -tmp2 * fjac[i][j][k - 1][3][3] - tmp1 * njac[i][j][k - 1][3][3] - tmp1 * dz4;
lhs[i][j][k][0][3][4] = -tmp2 * fjac[i][j][k - 1][3][4] - tmp1 * njac[i][j][k - 1][3][4];
lhs[i][j][k][0][4][0] = -tmp2 * fjac[i][j][k - 1][4][0] - tmp1 * njac[i][j][k - 1][4][0];
lhs[i][j][k][0][4][1] = -tmp2 * fjac[i][j][k - 1][4][1] - tmp1 * njac[i][j][k - 1][4][1];
lhs[i][j][k][0][4][2] = -tmp2 * fjac[i][j][k - 1][4][2] - tmp1 * njac[i][j][k - 1][4][2];
lhs[i][j][k][0][4][3] = -tmp2 * fjac[i][j][k - 1][4][3] - tmp1 * njac[i][j][k - 1][4][3];
lhs[i][j][k][0][4][4] = -tmp2 * fjac[i][j][k - 1][4][4] - tmp1 * njac[i][j][k - 1][4][4] - tmp1 * dz5;
lhs[i][j][k][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dz1;
lhs[i][j][k][1][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][1][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][1][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][1][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][1][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dz2;
lhs[i][j][k][1][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][1][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][1][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][1][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][1][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dz3;
lhs[i][j][k][1][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][1][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][1][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][1][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][1][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dz4;
lhs[i][j][k][1][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][1][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][1][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][1][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][1][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dz5;
lhs[i][j][k][2][0][0] = tmp2 * fjac[i][j][k + 1][0][0] - tmp1 * njac[i][j][k + 1][0][0] - tmp1 * dz1;
lhs[i][j][k][2][0][1] = tmp2 * fjac[i][j][k + 1][0][1] - tmp1 * njac[i][j][k + 1][0][1];
lhs[i][j][k][2][0][2] = tmp2 * fjac[i][j][k + 1][0][2] - tmp1 * njac[i][j][k + 1][0][2];
lhs[i][j][k][2][0][3] = tmp2 * fjac[i][j][k + 1][0][3] - tmp1 * njac[i][j][k + 1][0][3];
lhs[i][j][k][2][0][4] = tmp2 * fjac[i][j][k + 1][0][4] - tmp1 * njac[i][j][k + 1][0][4];
lhs[i][j][k][2][1][0] = tmp2 * fjac[i][j][k + 1][1][0] - tmp1 * njac[i][j][k + 1][1][0];
lhs[i][j][k][2][1][1] = tmp2 * fjac[i][j][k + 1][1][1] - tmp1 * njac[i][j][k + 1][1][1] - tmp1 * dz2;
lhs[i][j][k][2][1][2] = tmp2 * fjac[i][j][k + 1][1][2] - tmp1 * njac[i][j][k + 1][1][2];
lhs[i][j][k][2][1][3] = tmp2 * fjac[i][j][k + 1][1][3] - tmp1 * njac[i][j][k + 1][1][3];
lhs[i][j][k][2][1][4] = tmp2 * fjac[i][j][k + 1][1][4] - tmp1 * njac[i][j][k + 1][1][4];
lhs[i][j][k][2][2][0] = tmp2 * fjac[i][j][k + 1][2][0] - tmp1 * njac[i][j][k + 1][2][0];
lhs[i][j][k][2][2][1] = tmp2 * fjac[i][j][k + 1][2][1] - tmp1 * njac[i][j][k + 1][2][1];
lhs[i][j][k][2][2][2] = tmp2 * fjac[i][j][k + 1][2][2] - tmp1 * njac[i][j][k + 1][2][2] - tmp1 * dz3;
lhs[i][j][k][2][2][3] = tmp2 * fjac[i][j][k + 1][2][3] - tmp1 * njac[i][j][k + 1][2][3];
lhs[i][j][k][2][2][4] = tmp2 * fjac[i][j][k + 1][2][4] - tmp1 * njac[i][j][k + 1][2][4];
lhs[i][j][k][2][3][0] = tmp2 * fjac[i][j][k + 1][3][0] - tmp1 * njac[i][j][k + 1][3][0];
lhs[i][j][k][2][3][1] = tmp2 * fjac[i][j][k + 1][3][1] - tmp1 * njac[i][j][k + 1][3][1];
lhs[i][j][k][2][3][2] = tmp2 * fjac[i][j][k + 1][3][2] - tmp1 * njac[i][j][k + 1][3][2];
lhs[i][j][k][2][3][3] = tmp2 * fjac[i][j][k + 1][3][3] - tmp1 * njac[i][j][k + 1][3][3] - tmp1 * dz4;
lhs[i][j][k][2][3][4] = tmp2 * fjac[i][j][k + 1][3][4] - tmp1 * njac[i][j][k + 1][3][4];
lhs[i][j][k][2][4][0] = tmp2 * fjac[i][j][k + 1][4][0] - tmp1 * njac[i][j][k + 1][4][0];
lhs[i][j][k][2][4][1] = tmp2 * fjac[i][j][k + 1][4][1] - tmp1 * njac[i][j][k + 1][4][1];
lhs[i][j][k][2][4][2] = tmp2 * fjac[i][j][k + 1][4][2] - tmp1 * njac[i][j][k + 1][4][2];
lhs[i][j][k][2][4][3] = tmp2 * fjac[i][j][k + 1][4][3] - tmp1 * njac[i][j][k + 1][4][3];
lhs[i][j][k][2][4][4] = tmp2 * fjac[i][j][k + 1][4][4] - tmp1 * njac[i][j][k + 1][4][4] - tmp1 * dz5;
}
}
}
// #pragma omp dummyFlush BARRIER_START written([tmp1, tmp2, lhs.f]) read([rhs.f, rhs, grid_points.f, grid_points, lhs, i_imopVar15, lhs.f, binvcrhs])
#pragma omp barrier
int i_imopVar15;
int j_imopVar16;
int k_imopVar17;
int ksize;
ksize = grid_points[2] - 1;
#pragma omp for nowait
for (i_imopVar15 = 1; i_imopVar15 < grid_points[0] - 1; i_imopVar15++) {
for (j_imopVar16 = 1; j_imopVar16 < grid_points[1] - 1; j_imopVar16++) {
double ( *_imopVarPre418 );
double ( *_imopVarPre419 )[5];
double ( *_imopVarPre420 )[5];
_imopVarPre418 = rhs[i_imopVar15][j_imopVar16][0];
_imopVarPre419 = lhs[i_imopVar15][j_imopVar16][0][2];
_imopVarPre420 = lhs[i_imopVar15][j_imopVar16][0][1];
binvcrhs(_imopVarPre420, _imopVarPre419, _imopVarPre418);
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, grid_points.f, grid_points, lhs, binvrhs, matvec_sub, i_imopVar15, lhs.f, matmul_sub, binvcrhs])
#pragma omp barrier
for (k_imopVar17 = 1; k_imopVar17 < ksize; k_imopVar17++) {
#pragma omp for nowait
for (i_imopVar15 = 1; i_imopVar15 < grid_points[0] - 1; i_imopVar15++) {
for (j_imopVar16 = 1; j_imopVar16 < grid_points[1] - 1; j_imopVar16++) {
double ( *_imopVarPre424 );
double ( *_imopVarPre425 );
double ( *_imopVarPre426 )[5];
_imopVarPre424 = rhs[i_imopVar15][j_imopVar16][k_imopVar17];
_imopVarPre425 = rhs[i_imopVar15][j_imopVar16][k_imopVar17 - 1];
_imopVarPre426 = lhs[i_imopVar15][j_imopVar16][k_imopVar17][0];
matvec_sub(_imopVarPre426, _imopVarPre425, _imopVarPre424);
double ( *_imopVarPre430 )[5];
double ( *_imopVarPre431 )[5];
double ( *_imopVarPre432 )[5];
_imopVarPre430 = lhs[i_imopVar15][j_imopVar16][k_imopVar17][1];
_imopVarPre431 = lhs[i_imopVar15][j_imopVar16][k_imopVar17 - 1][2];
_imopVarPre432 = lhs[i_imopVar15][j_imopVar16][k_imopVar17][0];
matmul_sub(_imopVarPre432, _imopVarPre431, _imopVarPre430);
double ( *_imopVarPre436 );
double ( *_imopVarPre437 )[5];
double ( *_imopVarPre438 )[5];
_imopVarPre436 = rhs[i_imopVar15][j_imopVar16][k_imopVar17];
_imopVarPre437 = lhs[i_imopVar15][j_imopVar16][k_imopVar17][2];
_imopVarPre438 = lhs[i_imopVar15][j_imopVar16][k_imopVar17][1];
binvcrhs(_imopVarPre438, _imopVarPre437, _imopVarPre436);
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, grid_points.f, grid_points, lhs, binvrhs, matvec_sub, i_imopVar15, lhs.f, matmul_sub, binvcrhs])
#pragma omp barrier
}
#pragma omp for nowait
for (i_imopVar15 = 1; i_imopVar15 < grid_points[0] - 1; i_imopVar15++) {
for (j_imopVar16 = 1; j_imopVar16 < grid_points[1] - 1; j_imopVar16++) {
double ( *_imopVarPre442 );
double ( *_imopVarPre443 );
double ( *_imopVarPre444 )[5];
_imopVarPre442 = rhs[i_imopVar15][j_imopVar16][ksize];
_imopVarPre443 = rhs[i_imopVar15][j_imopVar16][ksize - 1];
_imopVarPre444 = lhs[i_imopVar15][j_imopVar16][ksize][0];
matvec_sub(_imopVarPre444, _imopVarPre443, _imopVarPre442);
double ( *_imopVarPre448 )[5];
double ( *_imopVarPre449 )[5];
double ( *_imopVarPre450 )[5];
_imopVarPre448 = lhs[i_imopVar15][j_imopVar16][ksize][1];
_imopVarPre449 = lhs[i_imopVar15][j_imopVar16][ksize - 1][2];
_imopVarPre450 = lhs[i_imopVar15][j_imopVar16][ksize][0];
matmul_sub(_imopVarPre450, _imopVarPre449, _imopVarPre448);
double ( *_imopVarPre453 );
double ( *_imopVarPre454 )[5];
_imopVarPre453 = rhs[i_imopVar15][j_imopVar16][ksize];
_imopVarPre454 = lhs[i_imopVar15][j_imopVar16][ksize][1];
binvrhs(_imopVarPre454, _imopVarPre453);
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, grid_points.f, grid_points, lhs, lhs.f, i_imopVar12])
#pragma omp barrier
int i_imopVar12;
int j_imopVar13;
int k_imopVar14;
int m;
int n;
#pragma omp for nowait
for (i_imopVar12 = 1; i_imopVar12 < grid_points[0] - 1; i_imopVar12++) {
for (j_imopVar13 = 1; j_imopVar13 < grid_points[1] - 1; j_imopVar13++) {
for (k_imopVar14 = grid_points[2] - 2; k_imopVar14 >= 0; k_imopVar14--) {
for (m = 0; m < 5; m++) {
for (n = 0; n < 5; n++) {
rhs[i_imopVar12][j_imopVar13][k_imopVar14][m] = rhs[i_imopVar12][j_imopVar13][k_imopVar14][m] - lhs[i_imopVar12][j_imopVar13][k_imopVar14][2][m][n] * rhs[i_imopVar12][j_imopVar13][k_imopVar14 + 1][n];
}
}
}
}
}
// #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([])
#pragma omp barrier
}
// #pragma omp dummyFlush BARRIER_START written([]) read([xcr.f, xxcon5, dy3ty1, vs.f, _imopVarPre270, yycon3, ce.f, dy1ty1, _imopVarPre276, nullCell, u, dy5ty1, xxcon4, us, dz4tz1, qs, ty2, yycon4, niter, dnxm1, dx4tx1, u.f, i, xce.f, grid_points.f, j, sqrt, xxcon3, square.f, us.f, _imopVarPre292, dz2tz1, qs.f, timer_read, _imopVarPre282, yycon5, zzcon3, i, rho_i, xce, dt, grid_points, dx2tx1, square, i, xxcon2, &verified, _imopVarPre293, tz2, zzcon2, rho_i.f, dy2ty1, rhs.f, &class, printf, _imopVarPre294, forcing.f, _imopVarPre288, zzcon5, _imopVarPre280, dy4ty1, dnym1, c_print_results, _imopVarPre176, fabs, dx1tx1, ws, _imopVarPre281, dssp, xce.f, zzcon4, dz5tz1, _imopVarPre269, c1, _imopVarPre175, _imopVarPre268, add, rhs_norm, ws.f, timer_stop, _imopVarPre286, dx5tx1, xcr.f, error_norm, _imopVarPre274, c2, dz3tz1, rhs, step, exact_solution, vs, yycon2, ce, _imopVarPre172, forcing, tx2, _imopVarPre287, con43, dx3tx1, _imopVarPre275, dnzm1, dz1tz1])
#pragma omp barrier
add();
}
}
double vp1;
double vm1;
double wijk;
double wp1;
double wm1;
int m;
int i;
int j;
int k;
double rho_inv;
double uijk;
double up1;
double um1;
double vijk;
#pragma omp parallel
{
#pragma omp master
{
timer_stop(1);
tmax = timer_read(1);
_imopVarPre175 = &verified;
_imopVarPre176 = &class;
no_time_steps = niter;
class_imopVar19 = _imopVarPre176;
verified_imopVar20 = _imopVarPre175;
epsilon = 1.0e-08;
error_norm(xce);
}
}
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
rho_inv = 1.0 / u[i][j][k][0];
rho_i[i][j][k] = rho_inv;
us[i][j][k] = u[i][j][k][1] * rho_inv;
vs[i][j][k] = u[i][j][k][2] * rho_inv;
ws[i][j][k] = u[i][j][k][3] * rho_inv;
square[i][j][k] = 0.5 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) * rho_inv;
qs[i][j][k] = square[i][j][k] * rho_inv;
}
}
}
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
rhs[i][j][k][m_imopVar18] = forcing[i][j][k][m_imopVar18];
}
}
}
}
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
uijk = us[i][j][k];
up1 = us[i + 1][j][k];
um1 = us[i - 1][j][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * (u[i + 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i - 1][j][k][0]) - tx2 * (u[i + 1][j][k][1] - u[i - 1][j][k][1]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * (u[i + 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i - 1][j][k][1]) + xxcon2 * con43 * (up1 - 2.0 * uijk + um1) - tx2 * (u[i + 1][j][k][1] * up1 - u[i - 1][j][k][1] * um1 + (u[i + 1][j][k][4] - square[i + 1][j][k] - u[i - 1][j][k][4] + square[i - 1][j][k]) * c2);
rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * (u[i + 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i - 1][j][k][2]) + xxcon2 * (vs[i + 1][j][k] - 2.0 * vs[i][j][k] + vs[i - 1][j][k]) - tx2 * (u[i + 1][j][k][2] * up1 - u[i - 1][j][k][2] * um1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * (u[i + 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i - 1][j][k][3]) + xxcon2 * (ws[i + 1][j][k] - 2.0 * ws[i][j][k] + ws[i - 1][j][k]) - tx2 * (u[i + 1][j][k][3] * up1 - u[i - 1][j][k][3] * um1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * (u[i + 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i - 1][j][k][4]) + xxcon3 * (qs[i + 1][j][k] - 2.0 * qs[i][j][k] + qs[i - 1][j][k]) + xxcon4 * (up1 * up1 - 2.0 * uijk * uijk + um1 * um1) + xxcon5 * (u[i + 1][j][k][4] * rho_i[i + 1][j][k] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i - 1][j][k][4] * rho_i[i - 1][j][k]) - tx2 * ((c1 * u[i + 1][j][k][4] - c2 * square[i + 1][j][k]) * up1 - (c1 * u[i - 1][j][k][4] - c2 * square[i - 1][j][k]) * um1);
}
}
}
i = 1;
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
rhs[i][j][k][m_imopVar18] = rhs[i][j][k][m_imopVar18] - dssp * (5.0 * u[i][j][k][m_imopVar18] - 4.0 * u[i + 1][j][k][m_imopVar18] + u[i + 2][j][k][m_imopVar18]);
}
}
}
i = 2;
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
rhs[i][j][k][m_imopVar18] = rhs[i][j][k][m_imopVar18] - dssp * (-4.0 * u[i - 1][j][k][m_imopVar18] + 6.0 * u[i][j][k][m_imopVar18] - 4.0 * u[i + 1][j][k][m_imopVar18] + u[i + 2][j][k][m_imopVar18]);
}
}
}
#pragma omp for nowait
for (i = 3; i < grid_points[0] - 3; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
rhs[i][j][k][m_imopVar18] = rhs[i][j][k][m_imopVar18] - dssp * (u[i - 2][j][k][m_imopVar18] - 4.0 * u[i - 1][j][k][m_imopVar18] + 6.0 * u[i][j][k][m_imopVar18] - 4.0 * u[i + 1][j][k][m_imopVar18] + u[i + 2][j][k][m_imopVar18]);
}
}
}
}
i = grid_points[0] - 3;
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
rhs[i][j][k][m_imopVar18] = rhs[i][j][k][m_imopVar18] - dssp * (u[i - 2][j][k][m_imopVar18] - 4.0 * u[i - 1][j][k][m_imopVar18] + 6.0 * u[i][j][k][m_imopVar18] - 4.0 * u[i + 1][j][k][m_imopVar18]);
}
}
}
i = grid_points[0] - 2;
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
rhs[i][j][k][m_imopVar18] = rhs[i][j][k][m_imopVar18] - dssp * (u[i - 2][j][k][m_imopVar18] - 4. * u[i - 1][j][k][m_imopVar18] + 5.0 * u[i][j][k][m_imopVar18]);
}
}
}
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
vijk = vs[i][j][k];
vp1 = vs[i][j + 1][k];
vm1 = vs[i][j - 1][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * (u[i][j + 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j - 1][k][0]) - ty2 * (u[i][j + 1][k][2] - u[i][j - 1][k][2]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * (u[i][j + 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j - 1][k][1]) + yycon2 * (us[i][j + 1][k] - 2.0 * us[i][j][k] + us[i][j - 1][k]) - ty2 * (u[i][j + 1][k][1] * vp1 - u[i][j - 1][k][1] * vm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * (u[i][j + 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j - 1][k][2]) + yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) - ty2 * (u[i][j + 1][k][2] * vp1 - u[i][j - 1][k][2] * vm1 + (u[i][j + 1][k][4] - square[i][j + 1][k] - u[i][j - 1][k][4] + square[i][j - 1][k]) * c2);
rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * (u[i][j + 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j - 1][k][3]) + yycon2 * (ws[i][j + 1][k] - 2.0 * ws[i][j][k] + ws[i][j - 1][k]) - ty2 * (u[i][j + 1][k][3] * vp1 - u[i][j - 1][k][3] * vm1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * (u[i][j + 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j - 1][k][4]) + yycon3 * (qs[i][j + 1][k] - 2.0 * qs[i][j][k] + qs[i][j - 1][k]) + yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk + vm1 * vm1) + yycon5 * (u[i][j + 1][k][4] * rho_i[i][j + 1][k] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i][j - 1][k][4] * rho_i[i][j - 1][k]) - ty2 * ((c1 * u[i][j + 1][k][4] - c2 * square[i][j + 1][k]) * vp1 - (c1 * u[i][j - 1][k][4] - c2 * square[i][j - 1][k]) * vm1);
}
}
}
j = 1;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
rhs[i][j][k][m_imopVar18] = rhs[i][j][k][m_imopVar18] - dssp * (5.0 * u[i][j][k][m_imopVar18] - 4.0 * u[i][j + 1][k][m_imopVar18] + u[i][j + 2][k][m_imopVar18]);
}
}
}
j = 2;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
rhs[i][j][k][m_imopVar18] = rhs[i][j][k][m_imopVar18] - dssp * (-4.0 * u[i][j - 1][k][m_imopVar18] + 6.0 * u[i][j][k][m_imopVar18] - 4.0 * u[i][j + 1][k][m_imopVar18] + u[i][j + 2][k][m_imopVar18]);
}
}
}
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 3; j < grid_points[1] - 3; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
rhs[i][j][k][m_imopVar18] = rhs[i][j][k][m_imopVar18] - dssp * (u[i][j - 2][k][m_imopVar18] - 4.0 * u[i][j - 1][k][m_imopVar18] + 6.0 * u[i][j][k][m_imopVar18] - 4.0 * u[i][j + 1][k][m_imopVar18] + u[i][j + 2][k][m_imopVar18]);
}
}
}
}
j = grid_points[1] - 3;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
rhs[i][j][k][m_imopVar18] = rhs[i][j][k][m_imopVar18] - dssp * (u[i][j - 2][k][m_imopVar18] - 4.0 * u[i][j - 1][k][m_imopVar18] + 6.0 * u[i][j][k][m_imopVar18] - 4.0 * u[i][j + 1][k][m_imopVar18]);
}
}
}
j = grid_points[1] - 2;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
rhs[i][j][k][m_imopVar18] = rhs[i][j][k][m_imopVar18] - dssp * (u[i][j - 2][k][m_imopVar18] - 4. * u[i][j - 1][k][m_imopVar18] + 5. * u[i][j][k][m_imopVar18]);
}
}
}
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
wijk = ws[i][j][k];
wp1 = ws[i][j][k + 1];
wm1 = ws[i][j][k - 1];
rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * (u[i][j][k + 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k - 1][0]) - tz2 * (u[i][j][k + 1][3] - u[i][j][k - 1][3]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * (u[i][j][k + 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k - 1][1]) + zzcon2 * (us[i][j][k + 1] - 2.0 * us[i][j][k] + us[i][j][k - 1]) - tz2 * (u[i][j][k + 1][1] * wp1 - u[i][j][k - 1][1] * wm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * (u[i][j][k + 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k - 1][2]) + zzcon2 * (vs[i][j][k + 1] - 2.0 * vs[i][j][k] + vs[i][j][k - 1]) - tz2 * (u[i][j][k + 1][2] * wp1 - u[i][j][k - 1][2] * wm1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * (u[i][j][k + 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k - 1][3]) + zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) - tz2 * (u[i][j][k + 1][3] * wp1 - u[i][j][k - 1][3] * wm1 + (u[i][j][k + 1][4] - square[i][j][k + 1] - u[i][j][k - 1][4] + square[i][j][k - 1]) * c2);
rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * (u[i][j][k + 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k - 1][4]) + zzcon3 * (qs[i][j][k + 1] - 2.0 * qs[i][j][k] + qs[i][j][k - 1]) + zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk + wm1 * wm1) + zzcon5 * (u[i][j][k + 1][4] * rho_i[i][j][k + 1] - 2.0 * u[i][j][k][4] * rho_i[i][j][k] + u[i][j][k - 1][4] * rho_i[i][j][k - 1]) - tz2 * ((c1 * u[i][j][k + 1][4] - c2 * square[i][j][k + 1]) * wp1 - (c1 * u[i][j][k - 1][4] - c2 * square[i][j][k - 1]) * wm1);
}
}
}
k = 1;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
rhs[i][j][k][m_imopVar18] = rhs[i][j][k][m_imopVar18] - dssp * (5.0 * u[i][j][k][m_imopVar18] - 4.0 * u[i][j][k + 1][m_imopVar18] + u[i][j][k + 2][m_imopVar18]);
}
}
}
k = 2;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
rhs[i][j][k][m_imopVar18] = rhs[i][j][k][m_imopVar18] - dssp * (-4.0 * u[i][j][k - 1][m_imopVar18] + 6.0 * u[i][j][k][m_imopVar18] - 4.0 * u[i][j][k + 1][m_imopVar18] + u[i][j][k + 2][m_imopVar18]);
}
}
}
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 3; k < grid_points[2] - 3; k++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
rhs[i][j][k][m_imopVar18] = rhs[i][j][k][m_imopVar18] - dssp * (u[i][j][k - 2][m_imopVar18] - 4.0 * u[i][j][k - 1][m_imopVar18] + 6.0 * u[i][j][k][m_imopVar18] - 4.0 * u[i][j][k + 1][m_imopVar18] + u[i][j][k + 2][m_imopVar18]);
}
}
}
}
k = grid_points[2] - 3;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
rhs[i][j][k][m_imopVar18] = rhs[i][j][k][m_imopVar18] - dssp * (u[i][j][k - 2][m_imopVar18] - 4.0 * u[i][j][k - 1][m_imopVar18] + 6.0 * u[i][j][k][m_imopVar18] - 4.0 * u[i][j][k + 1][m_imopVar18]);
}
}
}
k = grid_points[2] - 2;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
rhs[i][j][k][m_imopVar18] = rhs[i][j][k][m_imopVar18] - dssp * (u[i][j][k - 2][m_imopVar18] - 4.0 * u[i][j][k - 1][m_imopVar18] + 5.0 * u[i][j][k][m_imopVar18]);
}
}
}
#pragma omp for nowait
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m_imopVar18 = 0; m_imopVar18 < 5; m_imopVar18++) {
for (i = 1; i < grid_points[0] - 1; i++) {
rhs[i][j][k][m_imopVar18] = rhs[i][j][k][m_imopVar18] * dt;
}
}
}
}
rhs_norm(xcr);
for (m = 0; m < 5; m++) {
xcr[m] = xcr[m] / dt;
}
*class_imopVar19 = 'U';
*verified_imopVar20 = 1;
for (m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
int _imopVarPre268;
int _imopVarPre269;
int _imopVarPre270;
_imopVarPre268 = grid_points[0] == 12;
if (_imopVarPre268) {
_imopVarPre269 = grid_points[1] == 12;
if (_imopVarPre269) {
_imopVarPre270 = grid_points[2] == 12;
if (_imopVarPre270) {
_imopVarPre270 = no_time_steps == 60;
}
_imopVarPre269 = _imopVarPre270;
}
_imopVarPre268 = _imopVarPre269;
}
if (_imopVarPre268) {
*class_imopVar19 = 'S';
dtref = 1.0e-2;
xcrref[0] = 1.7034283709541311e-01;
xcrref[1] = 1.2975252070034097e-02;
xcrref[2] = 3.2527926989486055e-02;
xcrref[3] = 2.6436421275166801e-02;
xcrref[4] = 1.9211784131744430e-01;
xceref[0] = 4.9976913345811579e-04;
xceref[1] = 4.5195666782961927e-05;
xceref[2] = 7.3973765172921357e-05;
xceref[3] = 7.3821238632439731e-05;
xceref[4] = 8.9269630987491446e-04;
} else {
int _imopVarPre274;
int _imopVarPre275;
int _imopVarPre276;
_imopVarPre274 = grid_points[0] == 24;
if (_imopVarPre274) {
_imopVarPre275 = grid_points[1] == 24;
if (_imopVarPre275) {
_imopVarPre276 = grid_points[2] == 24;
if (_imopVarPre276) {
_imopVarPre276 = no_time_steps == 200;
}
_imopVarPre275 = _imopVarPre276;
}
_imopVarPre274 = _imopVarPre275;
}
if (_imopVarPre274) {
*class_imopVar19 = 'W';
dtref = 0.8e-3;
xcrref[0] = 0.1125590409344e+03;
xcrref[1] = 0.1180007595731e+02;
xcrref[2] = 0.2710329767846e+02;
xcrref[3] = 0.2469174937669e+02;
xcrref[4] = 0.2638427874317e+03;
xceref[0] = 0.4419655736008e+01;
xceref[1] = 0.4638531260002e+00;
xceref[2] = 0.1011551749967e+01;
xceref[3] = 0.9235878729944e+00;
xceref[4] = 0.1018045837718e+02;
} else {
int _imopVarPre280;
int _imopVarPre281;
int _imopVarPre282;
_imopVarPre280 = grid_points[0] == 64;
if (_imopVarPre280) {
_imopVarPre281 = grid_points[1] == 64;
if (_imopVarPre281) {
_imopVarPre282 = grid_points[2] == 64;
if (_imopVarPre282) {
_imopVarPre282 = no_time_steps == 200;
}
_imopVarPre281 = _imopVarPre282;
}
_imopVarPre280 = _imopVarPre281;
}
if (_imopVarPre280) {
*class_imopVar19 = 'A';
dtref = 0.8e-3;
xcrref[0] = 1.0806346714637264e+02;
xcrref[1] = 1.1319730901220813e+01;
xcrref[2] = 2.5974354511582465e+01;
xcrref[3] = 2.3665622544678910e+01;
xcrref[4] = 2.5278963211748344e+02;
xceref[0] = 4.2348416040525025e+00;
xceref[1] = 4.4390282496995698e-01;
xceref[2] = 9.6692480136345650e-01;
xceref[3] = 8.8302063039765474e-01;
xceref[4] = 9.7379901770829278e+00;
} else {
int _imopVarPre286;
int _imopVarPre287;
int _imopVarPre288;
_imopVarPre286 = grid_points[0] == 102;
if (_imopVarPre286) {
_imopVarPre287 = grid_points[1] == 102;
if (_imopVarPre287) {
_imopVarPre288 = grid_points[2] == 102;
if (_imopVarPre288) {
_imopVarPre288 = no_time_steps == 200;
}
_imopVarPre287 = _imopVarPre288;
}
_imopVarPre286 = _imopVarPre287;
}
if (_imopVarPre286) {
*class_imopVar19 = 'B';
dtref = 3.0e-4;
xcrref[0] = 1.4233597229287254e+03;
xcrref[1] = 9.9330522590150238e+01;
xcrref[2] = 3.5646025644535285e+02;
xcrref[3] = 3.2485447959084092e+02;
xcrref[4] = 3.2707541254659363e+03;
xceref[0] = 5.2969847140936856e+01;
xceref[1] = 4.4632896115670668e+00;
xceref[2] = 1.3122573342210174e+01;
xceref[3] = 1.2006925323559144e+01;
xceref[4] = 1.2459576151035986e+02;
} else {
int _imopVarPre292;
int _imopVarPre293;
int _imopVarPre294;
_imopVarPre292 = grid_points[0] == 162;
if (_imopVarPre292) {
_imopVarPre293 = grid_points[1] == 162;
if (_imopVarPre293) {
_imopVarPre294 = grid_points[2] == 162;
if (_imopVarPre294) {
_imopVarPre294 = no_time_steps == 200;
}
_imopVarPre293 = _imopVarPre294;
}
_imopVarPre292 = _imopVarPre293;
}
if (_imopVarPre292) {
*class_imopVar19 = 'C';
dtref = 1.0e-4;
xcrref[0] = 0.62398116551764615e+04;
xcrref[1] = 0.50793239190423964e+03;
xcrref[2] = 0.15423530093013596e+04;
xcrref[3] = 0.13302387929291190e+04;
xcrref[4] = 0.11604087428436455e+05;
xceref[0] = 0.16462008369091265e+03;
xceref[1] = 0.11497107903824313e+02;
xceref[2] = 0.41207446207461508e+02;
xceref[3] = 0.37087651059694167e+02;
xceref[4] = 0.36211053051841265e+03;
} else {
*verified_imopVar20 = 0;
}
}
}
}
}
for (m = 0; m < 5; m++) {
double _imopVarPre296;
double _imopVarPre297;
_imopVarPre296 = (xcr[m] - xcrref[m]) / xcrref[m];
_imopVarPre297 = fabs(_imopVarPre296);
xcrdif[m] = _imopVarPre297;
double _imopVarPre299;
double _imopVarPre300;
_imopVarPre299 = (xce[m] - xceref[m]) / xceref[m];
_imopVarPre300 = fabs(_imopVarPre299);
xcedif[m] = _imopVarPre300;
}
if (*class_imopVar19 != 'U') {
char _imopVarPre302;
_imopVarPre302 = *class_imopVar19;
printf(" Verification being performed for class %1c\n", _imopVarPre302);
printf(" accuracy setting for epsilon = %20.13e\n", epsilon);
double _imopVarPre305;
double _imopVarPre306;
_imopVarPre305 = dt - dtref;
_imopVarPre306 = fabs(_imopVarPre305);
if (_imopVarPre306 > epsilon) {
*verified_imopVar20 = 0;
*class_imopVar19 = 'U';
printf(" DT does not match the reference value of %15.8e\n", dtref);
}
} else {
printf(" Unknown class\n");
}
if (*class_imopVar19 != 'U') {
printf(" Comparison of RMS-norms of residual\n");
} else {
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++) {
if (*class_imopVar19 == 'U') {
double _imopVarPre308;
_imopVarPre308 = xcr[m];
printf(" %2d%20.13e\n", m, _imopVarPre308);
} else {
if (xcrdif[m] > epsilon) {
*verified_imopVar20 = 0;
double _imopVarPre312;
double _imopVarPre313;
double _imopVarPre314;
_imopVarPre312 = xcrdif[m];
_imopVarPre313 = xcrref[m];
_imopVarPre314 = xcr[m];
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, _imopVarPre314, _imopVarPre313, _imopVarPre312);
} else {
double _imopVarPre318;
double _imopVarPre319;
double _imopVarPre320;
_imopVarPre318 = xcrdif[m];
_imopVarPre319 = xcrref[m];
_imopVarPre320 = xcr[m];
printf(" %2d%20.13e%20.13e%20.13e\n", m, _imopVarPre320, _imopVarPre319, _imopVarPre318);
}
}
}
if (*class_imopVar19 != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
} else {
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++) {
if (*class_imopVar19 == 'U') {
double _imopVarPre322;
_imopVarPre322 = xce[m];
printf(" %2d%20.13e\n", m, _imopVarPre322);
} else {
if (xcedif[m] > epsilon) {
*verified_imopVar20 = 0;
double _imopVarPre326;
double _imopVarPre327;
double _imopVarPre328;
_imopVarPre326 = xcedif[m];
_imopVarPre327 = xceref[m];
_imopVarPre328 = xce[m];
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, _imopVarPre328, _imopVarPre327, _imopVarPre326);
} else {
double _imopVarPre332;
double _imopVarPre333;
double _imopVarPre334;
_imopVarPre332 = xcedif[m];
_imopVarPre333 = xceref[m];
_imopVarPre334 = xce[m];
printf(" %2d%20.13e%20.13e%20.13e\n", m, _imopVarPre334, _imopVarPre333, _imopVarPre332);
}
}
}
if (*class_imopVar19 == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
} else {
if (*verified_imopVar20 == 1) {
printf(" Verification Successful\n");
} else {
printf(" Verification failed\n");
}
}
n3 = grid_points[0] * grid_points[1] * grid_points[2];
navg = (grid_points[0] + grid_points[1] + grid_points[2]) / 3.0;
if (tmax != 0.0) {
mflops = 1.0e-6 * (double) niter * (3478.8 * (double) n3 - 17655.7 * (navg * navg) + 28023.7 * navg) / tmax;
} else {
mflops = 0.0;
}
int _imopVarPre180;
int _imopVarPre181;
int _imopVarPre182;
_imopVarPre180 = grid_points[2];
_imopVarPre181 = grid_points[1];
_imopVarPre182 = grid_points[0];
c_print_results("BT", class, _imopVarPre182, _imopVarPre181, _imopVarPre180, niter, nthreads, tmax, mflops, " floating point", verified, "3.0 structured", "15 Jul 2017", "gcc", "gcc", "(none)", "-I../common", "-O3 -fopenmp", "-O3 -fopenmp", "(none)");
}
static void add(void ) {
int i;
int j;
int k;
int m;
#pragma omp for nowait
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];
}
}
}
}
}
static void error_norm(double rms[5]) {
int i;
int j;
int k;
int m;
int d;
double xi;
double eta;
double zeta;
double u_exact[5];
double add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 0; i < grid_points[0]; i++) {
xi = (double) i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double) j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double) k * dnzm1;
exact_solution(xi, eta, zeta, u_exact);
for (m = 0; m < 5; m++) {
add = u[i][j][k][m] - u_exact[m];
rms[m] = rms[m] + add * add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d <= 2; d++) {
rms[m] = rms[m] / (double) (grid_points[d] - 2);
}
double _imopVarPre184;
double _imopVarPre185;
_imopVarPre184 = rms[m];
_imopVarPre185 = sqrt(_imopVarPre184);
rms[m] = _imopVarPre185;
}
}
static void rhs_norm(double rms[5]) {
int i;
int j;
int k;
int d;
int m;
double add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 1; i < grid_points[0] - 1; i++) {
for (j = 1; j < grid_points[1] - 1; j++) {
for (k = 1; k < grid_points[2] - 1; k++) {
for (m = 0; m < 5; m++) {
add = rhs[i][j][k][m];
rms[m] = rms[m] + add * add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d <= 2; d++) {
rms[m] = rms[m] / (double) (grid_points[d] - 2);
}
double _imopVarPre187;
double _imopVarPre188;
_imopVarPre187 = rms[m];
_imopVarPre188 = sqrt(_imopVarPre187);
rms[m] = _imopVarPre188;
}
}
static void exact_solution(double xi, double eta , double zeta , double dtemp[5]) {
int m;
for (m = 0; m < 5; m++) {
dtemp[m] = ce[m][0] + xi * (ce[m][1] + xi * (ce[m][4] + xi * (ce[m][7] + xi * ce[m][10]))) + eta * (ce[m][2] + eta * (ce[m][5] + eta * (ce[m][8] + eta * ce[m][11]))) + zeta * (ce[m][3] + zeta * (ce[m][6] + zeta * (ce[m][9] + zeta * ce[m][12])));
}
}
static void set_constants(void ) {
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 0.5;
ce[0][7] = 0.02;
ce[0][8] = 0.01;
ce[0][9] = 0.03;
ce[0][10] = 0.5;
ce[0][11] = 0.4;
ce[0][12] = 0.3;
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 0.01;
ce[1][8] = 0.03;
ce[1][9] = 0.02;
ce[1][10] = 0.4;
ce[1][11] = 0.3;
ce[1][12] = 0.5;
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 0.04;
ce[2][8] = 0.03;
ce[2][9] = 0.05;
ce[2][10] = 0.3;
ce[2][11] = 0.5;
ce[2][12] = 0.4;
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 0.03;
ce[3][8] = 0.05;
ce[3][9] = 0.04;
ce[3][10] = 0.2;
ce[3][11] = 0.1;
ce[3][12] = 0.3;
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 0.1;
ce[4][5] = 0.4;
ce[4][6] = 0.3;
ce[4][7] = 0.05;
ce[4][8] = 0.04;
ce[4][9] = 0.03;
ce[4][10] = 0.1;
ce[4][11] = 0.3;
ce[4][12] = 0.2;
c1 = 1.4;
c2 = 0.4;
c3 = 0.1;
c4 = 1.0;
c5 = 1.4;
dnxm1 = 1.0 / (double) (grid_points[0] - 1);
dnym1 = 1.0 / (double) (grid_points[1] - 1);
dnzm1 = 1.0 / (double) (grid_points[2] - 1);
c1c2 = c1 * c2;
c1c5 = c1 * c5;
c3c4 = c3 * c4;
c1345 = c1c5 * c3c4;
conz1 = (1.0 - c1c5);
tx1 = 1.0 / (dnxm1 * dnxm1);
tx2 = 1.0 / (2.0 * dnxm1);
tx3 = 1.0 / dnxm1;
ty1 = 1.0 / (dnym1 * dnym1);
ty2 = 1.0 / (2.0 * dnym1);
ty3 = 1.0 / dnym1;
tz1 = 1.0 / (dnzm1 * dnzm1);
tz2 = 1.0 / (2.0 * dnzm1);
tz3 = 1.0 / dnzm1;
dx1 = 0.75;
dx2 = 0.75;
dx3 = 0.75;
dx4 = 0.75;
dx5 = 0.75;
dy1 = 0.75;
dy2 = 0.75;
dy3 = 0.75;
dy4 = 0.75;
dy5 = 0.75;
dz1 = 1.0;
dz2 = 1.0;
dz3 = 1.0;
dz4 = 1.0;
dz5 = 1.0;
int _imopVarPre203;
double _imopVarPre204;
_imopVarPre203 = (dx3 > dx4);
if (_imopVarPre203) {
_imopVarPre204 = dx3;
} else {
_imopVarPre204 = dx4;
}
dxmax = _imopVarPre204;
int _imopVarPre207;
double _imopVarPre208;
_imopVarPre207 = (dy2 > dy4);
if (_imopVarPre207) {
_imopVarPre208 = dy2;
} else {
_imopVarPre208 = dy4;
}
dymax = _imopVarPre208;
int _imopVarPre211;
double _imopVarPre212;
_imopVarPre211 = (dz2 > dz3);
if (_imopVarPre211) {
_imopVarPre212 = dz2;
} else {
_imopVarPre212 = dz3;
}
dzmax = _imopVarPre212;
int _imopVarPre253;
double _imopVarPre254;
int _imopVarPre255;
double _imopVarPre256;
int _imopVarPre263;
double _imopVarPre264;
_imopVarPre253 = (dy1 > dz1);
if (_imopVarPre253) {
_imopVarPre254 = dy1;
} else {
_imopVarPre254 = dz1;
}
_imopVarPre255 = (dx1 > _imopVarPre254);
if (_imopVarPre255) {
_imopVarPre256 = dx1;
} else {
_imopVarPre263 = (dy1 > dz1);
if (_imopVarPre263) {
_imopVarPre264 = dy1;
} else {
_imopVarPre264 = dz1;
}
_imopVarPre256 = _imopVarPre264;
}
dssp = 0.25 * _imopVarPre256;
c4dssp = 4.0 * dssp;
c5dssp = 5.0 * dssp;
dttx1 = dt * tx1;
dttx2 = dt * tx2;
dtty1 = dt * ty1;
dtty2 = dt * ty2;
dttz1 = dt * tz1;
dttz2 = dt * tz2;
c2dttx1 = 2.0 * dttx1;
c2dtty1 = 2.0 * dtty1;
c2dttz1 = 2.0 * dttz1;
dtdssp = dt * dssp;
comz1 = dtdssp;
comz4 = 4.0 * dtdssp;
comz5 = 5.0 * dtdssp;
comz6 = 6.0 * dtdssp;
c3c4tx3 = c3c4 * tx3;
c3c4ty3 = c3c4 * ty3;
c3c4tz3 = c3c4 * tz3;
dx1tx1 = dx1 * tx1;
dx2tx1 = dx2 * tx1;
dx3tx1 = dx3 * tx1;
dx4tx1 = dx4 * tx1;
dx5tx1 = dx5 * tx1;
dy1ty1 = dy1 * ty1;
dy2ty1 = dy2 * ty1;
dy3ty1 = dy3 * ty1;
dy4ty1 = dy4 * ty1;
dy5ty1 = dy5 * ty1;
dz1tz1 = dz1 * tz1;
dz2tz1 = dz2 * tz1;
dz3tz1 = dz3 * tz1;
dz4tz1 = dz4 * tz1;
dz5tz1 = dz5 * tz1;
c2iv = 2.5;
con43 = 4.0 / 3.0;
con16 = 1.0 / 6.0;
xxcon1 = c3c4tx3 * con43 * tx3;
xxcon2 = c3c4tx3 * tx3;
xxcon3 = c3c4tx3 * conz1 * tx3;
xxcon4 = c3c4tx3 * con16 * tx3;
xxcon5 = c3c4tx3 * c1c5 * tx3;
yycon1 = c3c4ty3 * con43 * ty3;
yycon2 = c3c4ty3 * ty3;
yycon3 = c3c4ty3 * conz1 * ty3;
yycon4 = c3c4ty3 * con16 * ty3;
yycon5 = c3c4ty3 * c1c5 * ty3;
zzcon1 = c3c4tz3 * con43 * tz3;
zzcon2 = c3c4tz3 * tz3;
zzcon3 = c3c4tz3 * conz1 * tz3;
zzcon4 = c3c4tz3 * con16 * tz3;
zzcon5 = c3c4tz3 * c1c5 * tz3;
}
static void matvec_sub(double ablock[5][5], double avec[5] , double bvec[5]) {
int i;
for (i = 0; i < 5; i++) {
bvec[i] = bvec[i] - ablock[i][0] * avec[0] - ablock[i][1] * avec[1] - ablock[i][2] * avec[2] - ablock[i][3] * avec[3] - ablock[i][4] * avec[4];
}
}
static void matmul_sub(double ablock[5][5], double bblock[5][5] , double cblock[5][5]) {
int j;
for (j = 0; j < 5; j++) {
cblock[0][j] = cblock[0][j] - ablock[0][0] * bblock[0][j] - ablock[0][1] * bblock[1][j] - ablock[0][2] * bblock[2][j] - ablock[0][3] * bblock[3][j] - ablock[0][4] * bblock[4][j];
cblock[1][j] = cblock[1][j] - ablock[1][0] * bblock[0][j] - ablock[1][1] * bblock[1][j] - ablock[1][2] * bblock[2][j] - ablock[1][3] * bblock[3][j] - ablock[1][4] * bblock[4][j];
cblock[2][j] = cblock[2][j] - ablock[2][0] * bblock[0][j] - ablock[2][1] * bblock[1][j] - ablock[2][2] * bblock[2][j] - ablock[2][3] * bblock[3][j] - ablock[2][4] * bblock[4][j];
cblock[3][j] = cblock[3][j] - ablock[3][0] * bblock[0][j] - ablock[3][1] * bblock[1][j] - ablock[3][2] * bblock[2][j] - ablock[3][3] * bblock[3][j] - ablock[3][4] * bblock[4][j];
cblock[4][j] = cblock[4][j] - ablock[4][0] * bblock[0][j] - ablock[4][1] * bblock[1][j] - ablock[4][2] * bblock[2][j] - ablock[4][3] * bblock[3][j] - ablock[4][4] * bblock[4][j];
}
}
static void binvcrhs(double lhs[5][5], double c[5][5] , double r[5]) {
double pivot;
double coeff;
pivot = 1.00 / lhs[0][0];
lhs[0][1] = lhs[0][1] * pivot;
lhs[0][2] = lhs[0][2] * pivot;
lhs[0][3] = lhs[0][3] * pivot;
lhs[0][4] = lhs[0][4] * pivot;
c[0][0] = c[0][0] * pivot;
c[0][1] = c[0][1] * pivot;
c[0][2] = c[0][2] * pivot;
c[0][3] = c[0][3] * pivot;
c[0][4] = c[0][4] * pivot;
r[0] = r[0] * pivot;
coeff = lhs[1][0];
lhs[1][1] = lhs[1][1] - coeff * lhs[0][1];
lhs[1][2] = lhs[1][2] - coeff * lhs[0][2];
lhs[1][3] = lhs[1][3] - coeff * lhs[0][3];
lhs[1][4] = lhs[1][4] - coeff * lhs[0][4];
c[1][0] = c[1][0] - coeff * c[0][0];
c[1][1] = c[1][1] - coeff * c[0][1];
c[1][2] = c[1][2] - coeff * c[0][2];
c[1][3] = c[1][3] - coeff * c[0][3];
c[1][4] = c[1][4] - coeff * c[0][4];
r[1] = r[1] - coeff * r[0];
coeff = lhs[2][0];
lhs[2][1] = lhs[2][1] - coeff * lhs[0][1];
lhs[2][2] = lhs[2][2] - coeff * lhs[0][2];
lhs[2][3] = lhs[2][3] - coeff * lhs[0][3];
lhs[2][4] = lhs[2][4] - coeff * lhs[0][4];
c[2][0] = c[2][0] - coeff * c[0][0];
c[2][1] = c[2][1] - coeff * c[0][1];
c[2][2] = c[2][2] - coeff * c[0][2];
c[2][3] = c[2][3] - coeff * c[0][3];
c[2][4] = c[2][4] - coeff * c[0][4];
r[2] = r[2] - coeff * r[0];
coeff = lhs[3][0];
lhs[3][1] = lhs[3][1] - coeff * lhs[0][1];
lhs[3][2] = lhs[3][2] - coeff * lhs[0][2];
lhs[3][3] = lhs[3][3] - coeff * lhs[0][3];
lhs[3][4] = lhs[3][4] - coeff * lhs[0][4];
c[3][0] = c[3][0] - coeff * c[0][0];
c[3][1] = c[3][1] - coeff * c[0][1];
c[3][2] = c[3][2] - coeff * c[0][2];
c[3][3] = c[3][3] - coeff * c[0][3];
c[3][4] = c[3][4] - coeff * c[0][4];
r[3] = r[3] - coeff * r[0];
coeff = lhs[4][0];
lhs[4][1] = lhs[4][1] - coeff * lhs[0][1];
lhs[4][2] = lhs[4][2] - coeff * lhs[0][2];
lhs[4][3] = lhs[4][3] - coeff * lhs[0][3];
lhs[4][4] = lhs[4][4] - coeff * lhs[0][4];
c[4][0] = c[4][0] - coeff * c[0][0];
c[4][1] = c[4][1] - coeff * c[0][1];
c[4][2] = c[4][2] - coeff * c[0][2];
c[4][3] = c[4][3] - coeff * c[0][3];
c[4][4] = c[4][4] - coeff * c[0][4];
r[4] = r[4] - coeff * r[0];
pivot = 1.00 / lhs[1][1];
lhs[1][2] = lhs[1][2] * pivot;
lhs[1][3] = lhs[1][3] * pivot;
lhs[1][4] = lhs[1][4] * pivot;
c[1][0] = c[1][0] * pivot;
c[1][1] = c[1][1] * pivot;
c[1][2] = c[1][2] * pivot;
c[1][3] = c[1][3] * pivot;
c[1][4] = c[1][4] * pivot;
r[1] = r[1] * pivot;
coeff = lhs[0][1];
lhs[0][2] = lhs[0][2] - coeff * lhs[1][2];
lhs[0][3] = lhs[0][3] - coeff * lhs[1][3];
lhs[0][4] = lhs[0][4] - coeff * lhs[1][4];
c[0][0] = c[0][0] - coeff * c[1][0];
c[0][1] = c[0][1] - coeff * c[1][1];
c[0][2] = c[0][2] - coeff * c[1][2];
c[0][3] = c[0][3] - coeff * c[1][3];
c[0][4] = c[0][4] - coeff * c[1][4];
r[0] = r[0] - coeff * r[1];
coeff = lhs[2][1];
lhs[2][2] = lhs[2][2] - coeff * lhs[1][2];
lhs[2][3] = lhs[2][3] - coeff * lhs[1][3];
lhs[2][4] = lhs[2][4] - coeff * lhs[1][4];
c[2][0] = c[2][0] - coeff * c[1][0];
c[2][1] = c[2][1] - coeff * c[1][1];
c[2][2] = c[2][2] - coeff * c[1][2];
c[2][3] = c[2][3] - coeff * c[1][3];
c[2][4] = c[2][4] - coeff * c[1][4];
r[2] = r[2] - coeff * r[1];
coeff = lhs[3][1];
lhs[3][2] = lhs[3][2] - coeff * lhs[1][2];
lhs[3][3] = lhs[3][3] - coeff * lhs[1][3];
lhs[3][4] = lhs[3][4] - coeff * lhs[1][4];
c[3][0] = c[3][0] - coeff * c[1][0];
c[3][1] = c[3][1] - coeff * c[1][1];
c[3][2] = c[3][2] - coeff * c[1][2];
c[3][3] = c[3][3] - coeff * c[1][3];
c[3][4] = c[3][4] - coeff * c[1][4];
r[3] = r[3] - coeff * r[1];
coeff = lhs[4][1];
lhs[4][2] = lhs[4][2] - coeff * lhs[1][2];
lhs[4][3] = lhs[4][3] - coeff * lhs[1][3];
lhs[4][4] = lhs[4][4] - coeff * lhs[1][4];
c[4][0] = c[4][0] - coeff * c[1][0];
c[4][1] = c[4][1] - coeff * c[1][1];
c[4][2] = c[4][2] - coeff * c[1][2];
c[4][3] = c[4][3] - coeff * c[1][3];
c[4][4] = c[4][4] - coeff * c[1][4];
r[4] = r[4] - coeff * r[1];
pivot = 1.00 / lhs[2][2];
lhs[2][3] = lhs[2][3] * pivot;
lhs[2][4] = lhs[2][4] * pivot;
c[2][0] = c[2][0] * pivot;
c[2][1] = c[2][1] * pivot;
c[2][2] = c[2][2] * pivot;
c[2][3] = c[2][3] * pivot;
c[2][4] = c[2][4] * pivot;
r[2] = r[2] * pivot;
coeff = lhs[0][2];
lhs[0][3] = lhs[0][3] - coeff * lhs[2][3];
lhs[0][4] = lhs[0][4] - coeff * lhs[2][4];
c[0][0] = c[0][0] - coeff * c[2][0];
c[0][1] = c[0][1] - coeff * c[2][1];
c[0][2] = c[0][2] - coeff * c[2][2];
c[0][3] = c[0][3] - coeff * c[2][3];
c[0][4] = c[0][4] - coeff * c[2][4];
r[0] = r[0] - coeff * r[2];
coeff = lhs[1][2];
lhs[1][3] = lhs[1][3] - coeff * lhs[2][3];
lhs[1][4] = lhs[1][4] - coeff * lhs[2][4];
c[1][0] = c[1][0] - coeff * c[2][0];
c[1][1] = c[1][1] - coeff * c[2][1];
c[1][2] = c[1][2] - coeff * c[2][2];
c[1][3] = c[1][3] - coeff * c[2][3];
c[1][4] = c[1][4] - coeff * c[2][4];
r[1] = r[1] - coeff * r[2];
coeff = lhs[3][2];
lhs[3][3] = lhs[3][3] - coeff * lhs[2][3];
lhs[3][4] = lhs[3][4] - coeff * lhs[2][4];
c[3][0] = c[3][0] - coeff * c[2][0];
c[3][1] = c[3][1] - coeff * c[2][1];
c[3][2] = c[3][2] - coeff * c[2][2];
c[3][3] = c[3][3] - coeff * c[2][3];
c[3][4] = c[3][4] - coeff * c[2][4];
r[3] = r[3] - coeff * r[2];
coeff = lhs[4][2];
lhs[4][3] = lhs[4][3] - coeff * lhs[2][3];
lhs[4][4] = lhs[4][4] - coeff * lhs[2][4];
c[4][0] = c[4][0] - coeff * c[2][0];
c[4][1] = c[4][1] - coeff * c[2][1];
c[4][2] = c[4][2] - coeff * c[2][2];
c[4][3] = c[4][3] - coeff * c[2][3];
c[4][4] = c[4][4] - coeff * c[2][4];
r[4] = r[4] - coeff * r[2];
pivot = 1.00 / lhs[3][3];
lhs[3][4] = lhs[3][4] * pivot;
c[3][0] = c[3][0] * pivot;
c[3][1] = c[3][1] * pivot;
c[3][2] = c[3][2] * pivot;
c[3][3] = c[3][3] * pivot;
c[3][4] = c[3][4] * pivot;
r[3] = r[3] * pivot;
coeff = lhs[0][3];
lhs[0][4] = lhs[0][4] - coeff * lhs[3][4];
c[0][0] = c[0][0] - coeff * c[3][0];
c[0][1] = c[0][1] - coeff * c[3][1];
c[0][2] = c[0][2] - coeff * c[3][2];
c[0][3] = c[0][3] - coeff * c[3][3];
c[0][4] = c[0][4] - coeff * c[3][4];
r[0] = r[0] - coeff * r[3];
coeff = lhs[1][3];
lhs[1][4] = lhs[1][4] - coeff * lhs[3][4];
c[1][0] = c[1][0] - coeff * c[3][0];
c[1][1] = c[1][1] - coeff * c[3][1];
c[1][2] = c[1][2] - coeff * c[3][2];
c[1][3] = c[1][3] - coeff * c[3][3];
c[1][4] = c[1][4] - coeff * c[3][4];
r[1] = r[1] - coeff * r[3];
coeff = lhs[2][3];
lhs[2][4] = lhs[2][4] - coeff * lhs[3][4];
c[2][0] = c[2][0] - coeff * c[3][0];
c[2][1] = c[2][1] - coeff * c[3][1];
c[2][2] = c[2][2] - coeff * c[3][2];
c[2][3] = c[2][3] - coeff * c[3][3];
c[2][4] = c[2][4] - coeff * c[3][4];
r[2] = r[2] - coeff * r[3];
coeff = lhs[4][3];
lhs[4][4] = lhs[4][4] - coeff * lhs[3][4];
c[4][0] = c[4][0] - coeff * c[3][0];
c[4][1] = c[4][1] - coeff * c[3][1];
c[4][2] = c[4][2] - coeff * c[3][2];
c[4][3] = c[4][3] - coeff * c[3][3];
c[4][4] = c[4][4] - coeff * c[3][4];
r[4] = r[4] - coeff * r[3];
pivot = 1.00 / lhs[4][4];
c[4][0] = c[4][0] * pivot;
c[4][1] = c[4][1] * pivot;
c[4][2] = c[4][2] * pivot;
c[4][3] = c[4][3] * pivot;
c[4][4] = c[4][4] * pivot;
r[4] = r[4] * pivot;
coeff = lhs[0][4];
c[0][0] = c[0][0] - coeff * c[4][0];
c[0][1] = c[0][1] - coeff * c[4][1];
c[0][2] = c[0][2] - coeff * c[4][2];
c[0][3] = c[0][3] - coeff * c[4][3];
c[0][4] = c[0][4] - coeff * c[4][4];
r[0] = r[0] - coeff * r[4];
coeff = lhs[1][4];
c[1][0] = c[1][0] - coeff * c[4][0];
c[1][1] = c[1][1] - coeff * c[4][1];
c[1][2] = c[1][2] - coeff * c[4][2];
c[1][3] = c[1][3] - coeff * c[4][3];
c[1][4] = c[1][4] - coeff * c[4][4];
r[1] = r[1] - coeff * r[4];
coeff = lhs[2][4];
c[2][0] = c[2][0] - coeff * c[4][0];
c[2][1] = c[2][1] - coeff * c[4][1];
c[2][2] = c[2][2] - coeff * c[4][2];
c[2][3] = c[2][3] - coeff * c[4][3];
c[2][4] = c[2][4] - coeff * c[4][4];
r[2] = r[2] - coeff * r[4];
coeff = lhs[3][4];
c[3][0] = c[3][0] - coeff * c[4][0];
c[3][1] = c[3][1] - coeff * c[4][1];
c[3][2] = c[3][2] - coeff * c[4][2];
c[3][3] = c[3][3] - coeff * c[4][3];
c[3][4] = c[3][4] - coeff * c[4][4];
r[3] = r[3] - coeff * r[4];
}
static void binvrhs(double lhs[5][5], double r[5]) {
double pivot;
double coeff;
pivot = 1.00 / lhs[0][0];
lhs[0][1] = lhs[0][1] * pivot;
lhs[0][2] = lhs[0][2] * pivot;
lhs[0][3] = lhs[0][3] * pivot;
lhs[0][4] = lhs[0][4] * pivot;
r[0] = r[0] * pivot;
coeff = lhs[1][0];
lhs[1][1] = lhs[1][1] - coeff * lhs[0][1];
lhs[1][2] = lhs[1][2] - coeff * lhs[0][2];
lhs[1][3] = lhs[1][3] - coeff * lhs[0][3];
lhs[1][4] = lhs[1][4] - coeff * lhs[0][4];
r[1] = r[1] - coeff * r[0];
coeff = lhs[2][0];
lhs[2][1] = lhs[2][1] - coeff * lhs[0][1];
lhs[2][2] = lhs[2][2] - coeff * lhs[0][2];
lhs[2][3] = lhs[2][3] - coeff * lhs[0][3];
lhs[2][4] = lhs[2][4] - coeff * lhs[0][4];
r[2] = r[2] - coeff * r[0];
coeff = lhs[3][0];
lhs[3][1] = lhs[3][1] - coeff * lhs[0][1];
lhs[3][2] = lhs[3][2] - coeff * lhs[0][2];
lhs[3][3] = lhs[3][3] - coeff * lhs[0][3];
lhs[3][4] = lhs[3][4] - coeff * lhs[0][4];
r[3] = r[3] - coeff * r[0];
coeff = lhs[4][0];
lhs[4][1] = lhs[4][1] - coeff * lhs[0][1];
lhs[4][2] = lhs[4][2] - coeff * lhs[0][2];
lhs[4][3] = lhs[4][3] - coeff * lhs[0][3];
lhs[4][4] = lhs[4][4] - coeff * lhs[0][4];
r[4] = r[4] - coeff * r[0];
pivot = 1.00 / lhs[1][1];
lhs[1][2] = lhs[1][2] * pivot;
lhs[1][3] = lhs[1][3] * pivot;
lhs[1][4] = lhs[1][4] * pivot;
r[1] = r[1] * pivot;
coeff = lhs[0][1];
lhs[0][2] = lhs[0][2] - coeff * lhs[1][2];
lhs[0][3] = lhs[0][3] - coeff * lhs[1][3];
lhs[0][4] = lhs[0][4] - coeff * lhs[1][4];
r[0] = r[0] - coeff * r[1];
coeff = lhs[2][1];
lhs[2][2] = lhs[2][2] - coeff * lhs[1][2];
lhs[2][3] = lhs[2][3] - coeff * lhs[1][3];
lhs[2][4] = lhs[2][4] - coeff * lhs[1][4];
r[2] = r[2] - coeff * r[1];
coeff = lhs[3][1];
lhs[3][2] = lhs[3][2] - coeff * lhs[1][2];
lhs[3][3] = lhs[3][3] - coeff * lhs[1][3];
lhs[3][4] = lhs[3][4] - coeff * lhs[1][4];
r[3] = r[3] - coeff * r[1];
coeff = lhs[4][1];
lhs[4][2] = lhs[4][2] - coeff * lhs[1][2];
lhs[4][3] = lhs[4][3] - coeff * lhs[1][3];
lhs[4][4] = lhs[4][4] - coeff * lhs[1][4];
r[4] = r[4] - coeff * r[1];
pivot = 1.00 / lhs[2][2];
lhs[2][3] = lhs[2][3] * pivot;
lhs[2][4] = lhs[2][4] * pivot;
r[2] = r[2] * pivot;
coeff = lhs[0][2];
lhs[0][3] = lhs[0][3] - coeff * lhs[2][3];
lhs[0][4] = lhs[0][4] - coeff * lhs[2][4];
r[0] = r[0] - coeff * r[2];
coeff = lhs[1][2];
lhs[1][3] = lhs[1][3] - coeff * lhs[2][3];
lhs[1][4] = lhs[1][4] - coeff * lhs[2][4];
r[1] = r[1] - coeff * r[2];
coeff = lhs[3][2];
lhs[3][3] = lhs[3][3] - coeff * lhs[2][3];
lhs[3][4] = lhs[3][4] - coeff * lhs[2][4];
r[3] = r[3] - coeff * r[2];
coeff = lhs[4][2];
lhs[4][3] = lhs[4][3] - coeff * lhs[2][3];
lhs[4][4] = lhs[4][4] - coeff * lhs[2][4];
r[4] = r[4] - coeff * r[2];
pivot = 1.00 / lhs[3][3];
lhs[3][4] = lhs[3][4] * pivot;
r[3] = r[3] * pivot;
coeff = lhs[0][3];
lhs[0][4] = lhs[0][4] - coeff * lhs[3][4];
r[0] = r[0] - coeff * r[3];
coeff = lhs[1][3];
lhs[1][4] = lhs[1][4] - coeff * lhs[3][4];
r[1] = r[1] - coeff * r[3];
coeff = lhs[2][3];
lhs[2][4] = lhs[2][4] - coeff * lhs[3][4];
r[2] = r[2] - coeff * r[3];
coeff = lhs[4][3];
lhs[4][4] = lhs[4][4] - coeff * lhs[3][4];
r[4] = r[4] - coeff * r[3];
pivot = 1.00 / lhs[4][4];
r[4] = r[4] * pivot;
coeff = lhs[0][4];
r[0] = r[0] - coeff * r[4];
coeff = lhs[1][4];
r[1] = r[1] - coeff * r[4];
coeff = lhs[2][4];
r[2] = r[2] - coeff * r[4];
coeff = lhs[3][4];
r[3] = r[3] - coeff * r[4];
}
|
GB_binop__land_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_int32)
// A.*B function (eWiseMult): GB (_AemultB_01__land_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__land_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__land_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int32)
// A*D function (colscale): GB (_AxD__land_int32)
// D*A function (rowscale): GB (_DxB__land_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__land_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__land_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int32)
// C=scalar+B GB (_bind1st__land_int32)
// C=scalar+B' GB (_bind1st_tran__land_int32)
// C=A+scalar GB (_bind2nd__land_int32)
// C=A'+scalar GB (_bind2nd_tran__land_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT32 || GxB_NO_LAND_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__land_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__land_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__land_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sub_copy.c | // pmlib C++ test program based on stream.c by John McCalpin
# include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
// #ifdef __cplusplus
// extern "C" int omp_get_num_threads();
// #else
// extern int omp_get_num_threads();
// #endif
# define FLT_MAX 1.0E+6
// # define N 10000000
# define N 50000000
// if N >= 100M, the mcmodel compile option will be needed
# define NTIMES 10
# define OFFSET 0
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
static double a[N+OFFSET], b[N+OFFSET], c[N+OFFSET];
static double avgtime[4] = {0,0,0,0}, maxtime[4] = {0,0,0,0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(double) * (double)N,
2 * sizeof(double) * (double)N,
3 * sizeof(double) * (double)N,
3 * sizeof(double) * (double)N
};
extern double mysecond();
void stream_copy()
{
int quantum, checktick();
int BytesPerWord;
register int j, k;
double scalar, times[4][NTIMES];
k = 0;
#ifdef _OPENMP
k = omp_get_max_threads();
#endif
printf("Modified STREAM COPY, num_threads=%d, array size= %d\n", k, N);
#pragma omp parallel for
for (j=0; j<N; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
times[0][k] = mysecond();
#pragma omp parallel for
for (j=0; j<N; j++)
c[j] = a[j];
times[0][k] = mysecond() - times[0][k];
}
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
j=0;
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
printf("Function Rate (MB/s) Avg time Min time Max time\n");
{
j=0;
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%11.4f %11.4f %11.4f %11.4f\n", label[j],
1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]);
}
}
void stream_triad()
{
int quantum, checktick();
int BytesPerWord;
register int j, k;
double scalar, times[4][NTIMES];
k = 0;
#ifdef _OPENMP
k = omp_get_max_threads();
#endif
printf("Modified STREAM TRIAD, num_threads=%d, array size= %d\n", k, N);
#pragma omp parallel for
for (j=0; j<N; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
times[3][k] = mysecond();
#pragma omp parallel for
for (j=0; j<N; j++)
a[j] = b[j]+scalar*c[j];
times[3][k] = mysecond() - times[3][k];
}
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
j=3;
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
printf("Function Rate (MB/s) Avg time Min time Max time\n");
{
j=3;
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%11.4f %11.4f %11.4f %11.4f\n", label[j],
1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]);
}
}
# define M 20
int
checktick()
{
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while( ((t2=mysecond()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = MIN(minDelta, MAX(Delta,0));
}
return(minDelta);
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
#ifdef __GNUC__
#define __USE_BSD 1
#endif
#include <sys/time.h>
double mysecond()
{
struct timeval tp;
// struct timezone tzp;
int i;
// i = gettimeofday(&tp,&tzp);
i = gettimeofday(&tp, NULL);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
|
GB_unaryop__ainv_uint8_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint8_int8
// op(A') function: GB_tran__ainv_uint8_int8
// C type: uint8_t
// A type: int8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint8_int8
(
uint8_t *Cx, // Cx and Ax may be aliased
int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ordering_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file ordering_op-inl.h
* \brief Function definition of matrix related operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
#include <mxnet/operator_util.h>
#include <dmlc/optional.h>
#include <mshadow/tensor.h>
#include <algorithm>
#include <vector>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "./sort_op.h"
#include "./indexing_op.h"
namespace mshadow {
template<typename xpu, int src_dim, typename DType, int dst_dim>
inline Tensor<xpu, dst_dim, DType> inplace_reshape(Tensor<xpu, src_dim, DType> src,
Shape<dst_dim> target_shape) {
CHECK_EQ(src.CheckContiguous(), true);
return Tensor<xpu, dst_dim, DType>(src.dptr_, target_shape, src.stream_);
}
};
namespace mxnet {
namespace op {
// These enums are only visible within this header
namespace topk_enum {
enum TopKReturnType {kReturnValue, kReturnIndices, kReturnMask, kReturnBoth};
} // topk_enum
struct TopKParam : public dmlc::Parameter<TopKParam> {
dmlc::optional<int> axis;
int k;
int ret_typ;
bool is_ascend;
DMLC_DECLARE_PARAMETER(TopKParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to choose the top k indices."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(k).set_default(1)
.describe("Number of top elements to select,"
" should be always smaller than or equal to the element number in the given axis."
" A global sort is performed if set k < 1.");
DMLC_DECLARE_FIELD(ret_typ).set_default(topk_enum::kReturnIndices)
.add_enum("value", topk_enum::kReturnValue)
.add_enum("indices", topk_enum::kReturnIndices)
.add_enum("mask", topk_enum::kReturnMask)
.add_enum("both", topk_enum::kReturnBoth)
.describe("The return type.\n"
" \"value\" means to return the top k values,"
" \"indices\" means to return the indices of the top k values,"
" \"mask\" means to return a mask array containing 0 and 1. 1 means the top k values."
" \"both\" means to return a list of both values and indices of top k elements.");
DMLC_DECLARE_FIELD(is_ascend).set_default(false)
.describe("Whether to choose k largest or k smallest elements."
" Top K largest elements will be chosen if set to false.");
}
};
struct SortParam : public dmlc::Parameter<SortParam> {
dmlc::optional<int> axis;
bool is_ascend;
DMLC_DECLARE_PARAMETER(SortParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to choose sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true)
.describe("Whether to sort in ascending or descending order.");
}
};
struct ArgSortParam : public dmlc::Parameter<ArgSortParam> {
dmlc::optional<int> axis;
bool is_ascend;
DMLC_DECLARE_PARAMETER(ArgSortParam) {
DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1))
.describe("Axis along which to sort the input tensor."
" If not given, the flattened array is used. Default is -1.");
DMLC_DECLARE_FIELD(is_ascend).set_default(true)
.describe("Whether to sort in ascending or descending order.");
}
};
inline void ParseTopKParam(const TShape& src_shape, const TopKParam& param, TShape *target_shape,
int *batch_size, int *element_num, int *axis, int *k,
bool *do_transpose, bool *is_ascend) {
*do_transpose = false;
*k = param.k;
*is_ascend = param.is_ascend;
// get batch_size, axis and element_num
if (!static_cast<bool>(param.axis)) { // No axis given
*axis = 0;
*batch_size = 1;
*element_num = src_shape.Size();
} else {
*axis = param.axis.value();
if (*axis < 0) {
*axis += src_shape.ndim();
}
CHECK(*axis >= 0 && *axis < static_cast<int>(src_shape.ndim()))
<< "Invalid axis! axis should be between 0 and "
<< src_shape.ndim() << ", found axis=" << *axis;
*batch_size = src_shape.Size() / src_shape[*axis];
*element_num = src_shape[*axis];
if (*axis != static_cast<int>(src_shape.ndim()) - 1) {
*do_transpose = true;
}
}
// get k
if (param.k <= 0) {
*k = *element_num;
}
// get target_shape
if (!static_cast<bool>(param.axis)) {
if (param.ret_typ != topk_enum::kReturnMask) {
*target_shape = mshadow::Shape1(*k);
} else {
*target_shape = src_shape;
}
} else {
*target_shape = src_shape;
if (param.ret_typ != topk_enum::kReturnMask) {
(*target_shape)[*axis] = *k;
}
}
CHECK(*k >= 1 && *k <= *element_num) << "k must be smaller than "
<< *element_num << ", get k = " << *k;
}
using namespace mshadow;
template<typename xpu>
void TopKSort(const Tensor<xpu, 1, real_t>& dat,
const Tensor<xpu, 1, int>& ind,
const Tensor<xpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<xpu> *s);
template<>
MSHADOW_FORCE_INLINE void TopKSort<cpu>(const Tensor<cpu, 1, real_t>& dat,
const Tensor<cpu, 1, int>& ind,
const Tensor<cpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<cpu> *s) {
// Use full sort when K is relatively large.
const bool full_sort(K*8 > N);
// Batch size.
const int M(dat.size(0)/N);
const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount());
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < M; ++i) {
real_t *vals = dat.dptr_;
int *indices = ind.dptr_+i*N;
if (is_ascend) {
if (full_sort) {
std::sort(indices, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; });
} else {
std::partial_sort(indices, indices+K, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] < vals[i2]; });
}
} else {
if (full_sort) {
std::sort(indices, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; });
} else {
std::partial_sort(indices, indices+K, indices+N,
[&](const int& i1, const int& i2){ return vals[i1] > vals[i2]; });
}
}
real_t *buff = reinterpret_cast<real_t*>(work.dptr_)+i*K;
for (int j = 0; j < K; ++j) {
buff[j] = vals[indices[j]];
}
std::copy(buff, buff+K, &vals[i*N]);
}
}
#ifdef __CUDACC__
template<typename DType>
MSHADOW_XINLINE bool TopKCompare(DType val1, int ind1, DType val2, int ind2, bool is_ascend) {
// Negative indices denote undefined values which are considered arbitrary small resp. large.
return (ind2 < 0) || (ind1 >= 0 && ((is_ascend && val1 < val2) || (!is_ascend && val1 > val2)));
}
template<typename DType>
MSHADOW_XINLINE void MergeTopK(int K, DType *val1, int *ind1, DType *val2, int *ind2,
bool is_ascend) {
// In-place merge of two sorted top-K lists into val1/ind1. First determine the intervals
// [0,..,i1], [0,..i2] of the two lists that will be part of the merged list.
int i1(K-1), i2(K-1);
for (int i = 0; i < K; ++i) {
if (TopKCompare(val1[i1], ind1[i1], val2[i2], ind2[i2], is_ascend)) {
--i2;
} else {
--i1;
}
}
// Now merge the lists from back to front.
for (int i = K; i--;) {
if (i2 < 0 || i1 >= 0 && TopKCompare(val2[i2], ind2[i2], val1[i1], ind1[i1], is_ascend)) {
val1[i] = val1[i1];
ind1[i] = ind1[i1];
--i1;
} else {
val1[i] = val2[i2];
ind1[i] = ind2[i2];
--i2;
}
}
}
template<typename DType>
__global__ void PartialSortSmallK(int K, int N, DType *val, int *ind, bool is_ascend) {
// Buffer for pairwise reduction.
extern __shared__ int buff[];
// Start of buffer sections associated with this thread.
const int offset(threadIdx.x*K);
int *ind_buff = &buff[offset];
DType *val_buff = reinterpret_cast<DType*>(&buff[blockDim.x*K])+offset;
// Initialize top-K values for this thread.
for (int i = 0; i < K; ++i) {
ind_buff[i] = -1;
}
// Range of values this thread cares about. Each thread block processes
// a different batch item (i.e. a different set of ind/val where we
// have to select the top-K elements). All threads within the same
// block work on the same batch item.
const int first(blockIdx.x*N+threadIdx.x), last((blockIdx.x+1)*N);
// Select top-K from this range and store it sorted in the buffer.
// We assume a small K, so linear insertion is o.k.
for (int i = first; i < last; i += blockDim.x) {
DType cur_val(val[i]);
int cur_ind(ind[i]);
for (int j = K; j-- && TopKCompare(cur_val, cur_ind, val_buff[j], ind_buff[j], is_ascend); ) {
if (j+1 < K) {
val_buff[j+1] = val_buff[j];
ind_buff[j+1] = ind_buff[j];
}
val_buff[j] = cur_val;
ind_buff[j] = cur_ind;
}
}
// Recursive merge of sorted lists for this thread block. Note that blockDim.x is not
// necessary a power of two, therefore the additional checks for last_s.
for (unsigned int s = (blockDim.x+1)/2, last_s = blockDim.x;
last_s > 1; last_s = s, s = (s+1)/2) {
__syncthreads();
if (threadIdx.x < s && threadIdx.x+s < last_s) {
MergeTopK(K, val_buff, ind_buff, val_buff+s*K, ind_buff+s*K, is_ascend);
}
}
// Final updates on master thread.
if (threadIdx.x == 0) {
for (int i = 0; i < K; ++i) {
ind[blockIdx.x*N+i] = ind_buff[i];
val[blockIdx.x*N+i] = val_buff[i];
}
}
}
template<>
MSHADOW_FORCE_INLINE void TopKSort<gpu>(const Tensor<gpu, 1, real_t>& dat,
const Tensor<gpu, 1, int>& ind,
const Tensor<gpu, 1, char>& work,
int K, int N, bool is_ascend,
Stream<gpu> *s) {
// Use full sort for all but very small K for which we
// can do a partial sort entirely within shared memory.
const bool full_sort(K > 5);
// Batch size.
const int M(dat.size(0)/N);
if (full_sort) {
// Divide workspace into two parts. The first one is needed to store batch ids.
const int id_size(sizeof(int)*ind.size(0));
Tensor<gpu, 1, int> batch_id(reinterpret_cast<int*>(work.dptr_), Shape1(ind.size(0)), s);
Tensor<gpu, 1, char> sort_work(work.dptr_+id_size, Shape1(work.size(0)-id_size), s);
mxnet::op::SortByKey(dat, ind, is_ascend, &sort_work);
if (M > 1) {
// Back to back sorting. Note that mxnet::op::SortByKey is a stable sort.
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, dat, true, &sort_work);
batch_id = ind / N;
mxnet::op::SortByKey(batch_id, ind, true, &sort_work);
}
} else {
const int nthreads(mshadow::cuda::kBaseThreadNum);
PartialSortSmallK<<<M, nthreads, nthreads*K*(sizeof(int)+sizeof(real_t)),
mshadow::Stream<gpu>::GetStream(s)>>>
(K, N, dat.dptr_, ind.dptr_, is_ascend);
}
}
#endif
/*!
* \brief Implementation of the TopK operation
*
*
* \param ctx the running context
* \param resource temporary resource handler
* \param src the Source blob
* \param ret the destination blobs
* \param k the K elements to keep
* \param param the topk parameters
* \tparam xpu the device type.
*/
template<typename xpu>
void TopKImpl(RunContext ctx,
Resource resource,
const TBlob& src,
const std::vector<TBlob>& ret,
const TopKParam& param) {
using namespace mshadow;
using namespace mshadow::expr;
for (auto ret_ele : ret) {
CHECK_EQ(ret_ele.type_flag_, src.type_flag_);
}
// 1. Parse and initialize information
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 1, char> workspace;
Tensor<xpu, 1, char> temp_workspace;
Tensor<xpu, 1, real_t> sorted_dat;
Tensor<xpu, 1, int> indices, sel_indices;
Tensor<xpu, 2, real_t> mask_val;
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
TShape target_shape;
ParseTopKParam(src.shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
Tensor<xpu, 3, real_t> dat = src.FlatTo3D<xpu, real_t>(axis, axis, s);
size_t temp_size = 0;
// Temp space needed by the gpu-based full sorts.
temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<int, int, xpu>(src.Size()));
temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<int, real_t, xpu>(src.Size()));
temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<real_t, int, xpu>(src.Size()));
// Additional temp space for gpu full sorts for batch ids.
temp_size += sizeof(int) * src.Size();
// Temp space for cpu sorts.
temp_size = std::max(temp_size, sizeof(real_t) * src.Size());
size_t workspace_size = temp_size + sizeof(real_t) * src.Size() + sizeof(int) * src.Size();
if (param.ret_typ == topk_enum::kReturnMask) {
workspace_size += sizeof(int) * batch_size * k + sizeof(real_t) * batch_size * k;
}
workspace = resource.get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
char* workspace_curr_ptr = workspace.dptr_;
sorted_dat = Tensor<xpu, 1, real_t>(reinterpret_cast<real_t*>(workspace_curr_ptr),
Shape1(src.Size()), s); // contain sorted dat
workspace_curr_ptr += sizeof(real_t) * src.Size();
indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr),
Shape1(src.Size()), s); // indices in the original matrix
workspace_curr_ptr += sizeof(int) * src.Size();
if (do_transpose) {
sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size()));
} else {
sorted_dat = reshape(dat, Shape1(src.Size()));
}
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, 0, 1,
kWriteTo, indices.dptr_);
CHECK_EQ(sorted_dat.CheckContiguous(), true);
CHECK_EQ(indices.CheckContiguous(), true);
if (param.ret_typ == topk_enum::kReturnMask) {
sel_indices = Tensor<xpu, 1, int>(reinterpret_cast<int*>(workspace_curr_ptr),
Shape1(batch_size * k), s);
workspace_curr_ptr += sizeof(int) * batch_size * k;
mask_val = Tensor<xpu, 2, real_t>(reinterpret_cast<real_t*>(workspace_curr_ptr),
Shape2(batch_size * k, 1), s);
workspace_curr_ptr += sizeof(real_t) * batch_size * k;
mask_val = scalar<real_t>(1);
CHECK_EQ(sel_indices.CheckContiguous(), true);
CHECK_EQ(mask_val.CheckContiguous(), true);
}
temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space
workspace_curr_ptr += temp_size;
// 2. Perform inplace batch sort.
// After sorting, each batch in `sorted_dat` will be sorted in the corresponding order
// up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat`
TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s);
// 3. Assign results to the ret blob
if (param.ret_typ == topk_enum::kReturnMask) {
Tensor<xpu, 2, real_t> ret_mask =
ret[0].get_with_shape<xpu, 2, real_t>(Shape2(ret[0].Size(), 1), s);
ret_mask = scalar<real_t>(0);
sel_indices = reshape(slice<1>(
inplace_reshape(indices,
Shape2(batch_size,
element_num)), 0, k),
Shape1(batch_size * k));
if (do_transpose) {
TShape src_shape = src.shape_.FlatTo3D(axis);
CHECK_EQ(sel_indices.CheckContiguous(), true);
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
}
IndexFill(ret_mask, sel_indices, mask_val);
} else if (param.ret_typ == topk_enum::kReturnIndices) {
indices = F<mshadow_op::mod>(indices, element_num);
if (do_transpose) {
Tensor<xpu, 3, real_t> ret_indices = ret[0].FlatTo3D<xpu, real_t>(axis, axis, s);
ret_indices = tcast<real_t>(transpose(
slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k),
Shape3(0, 2, 1)));
} else {
Tensor<xpu, 2, real_t> ret_indices =
ret[0].get_with_shape<xpu, 2, real_t>(Shape2(batch_size, k), s);
ret_indices = tcast<real_t>(slice<1>(
inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k));
}
} else {
indices = F<mshadow_op::mod>(indices, element_num);
if (do_transpose) {
Tensor<xpu, 3, real_t> ret_value = ret[0].FlatTo3D<xpu, real_t>(axis, axis, s);
Tensor<xpu, 3, real_t> ret_indices = ret[1].FlatTo3D<xpu, real_t>(axis, axis, s);
ret_value = transpose(
slice<2>(inplace_reshape(sorted_dat,
Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)),
0, k),
Shape3(0, 2, 1));
ret_indices = tcast<real_t>(transpose(
slice<2>(inplace_reshape(indices,
Shape3(ret_indices.shape_[0],
ret_indices.shape_[2],
element_num)),
0, k),
Shape3(0, 2, 1)));
} else {
Tensor<xpu, 2, real_t> ret_value =
ret[0].get_with_shape<xpu, 2, real_t>(Shape2(batch_size, k), s);
Tensor<xpu, 2, real_t> ret_indices =
ret[1].get_with_shape<xpu, 2, real_t>(Shape2(batch_size, k), s);
ret_value = slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k);
ret_indices = tcast<real_t>(slice<1>(
inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k));
}
}
}
template<typename xpu>
void TopK(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
// TODO(sxjscience) We can support inplace in the future
CHECK_EQ(req[0], kWriteTo) << "TopK does not support inplace";
TopKImpl<xpu>(ctx.run_ctx, ctx.requested[0], inputs[0], outputs, param);
}
template<typename xpu>
void Sort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
CHECK_EQ(req[0], kWriteTo) << "Sort does not support inplace";
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
TopKImpl<xpu>(ctx.run_ctx, ctx.requested[0], inputs[0], outputs, topk_param);
}
template<typename xpu>
void ArgSort(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
CHECK_EQ(req[0], kWriteTo) << "ArgSort does not support inplace";
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnIndices;
TopKImpl<xpu>(ctx.run_ctx, ctx.requested[0], inputs[0], outputs, topk_param);
}
template<typename xpu>
void TopKBackward_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_NE(req[0], kWriteInplace);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.run_ctx.get_stream<xpu>();
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth);
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
TShape target_shape;
ParseTopKParam(outputs[0].shape_, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
Tensor<xpu, 1, real_t> workspace =
ctx.requested[0].get_space_typed<xpu, 1, real_t>(Shape1(batch_size * k * 2 + batch_size), s);
Tensor<xpu, 1, real_t> sel_indices =
Tensor<xpu, 1, real_t>(workspace.dptr_, Shape1(batch_size * k), s);
Tensor<xpu, 1, real_t> batch_shift =
Tensor<xpu, 1, real_t>(workspace.dptr_ + batch_size * k, Shape1(batch_size), s);
Tensor<xpu, 1, real_t> dummy_index =
Tensor<xpu, 1, real_t>(workspace.dptr_ + batch_size * k + batch_size,
Shape1(batch_size * k), s);
Tensor<xpu, 2, real_t> out_grad =
inputs[0].get_with_shape<xpu, 2, real_t>(Shape2(inputs[0].shape_.Size(), 1), s);
Tensor<xpu, 2, real_t> in_grad =
outputs[0].get_with_shape<xpu, 2, real_t>(Shape2(outputs[0].shape_.Size(), 1), s);
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size, 1, 0.0f,
static_cast<real_t>(element_num), kWriteTo, batch_shift.dptr_);
if (do_transpose) {
Tensor<xpu, 1, real_t> indices = inputs[2].FlatTo1D<xpu, real_t>(s);
TShape src_shape = outputs[0].shape_.FlatTo3D(axis);
sel_indices = reshape(transpose(
broadcast_to(inplace_reshape(batch_shift,
Shape3(src_shape[0], src_shape[2], 1)),
TShape(Shape3(src_shape[0], src_shape[2], k))),
Shape3(0, 2, 1)),
Shape1(batch_size * k));
sel_indices += indices;
sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]),
Shape3(0, 2, 1));
} else {
Tensor<xpu, 2, real_t> indices =
inputs[2].get_with_shape<xpu, 2, real_t>(Shape2(batch_size, k), s);
sel_indices = reshape(indices +
broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)),
TShape(Shape2(batch_size, k))),
Shape1(batch_size * k));
}
CHECK_EQ(sel_indices.CheckContiguous(), true);
if (kWriteTo == req[0]) {
in_grad = scalar<real_t>(0);
IndexFill(in_grad, sel_indices, out_grad);
} else if (kAddTo == req[0]) {
// TODO(sxjscience) We can use AddTakeGrad in the future.
// However, the current implementation of AddTakeGrad is not so efficient.
mxnet_op::Kernel<range_fwd, xpu>::Launch(s, sel_indices.shape_.Size(), 1, 0.0f,
1.0f, kWriteTo, dummy_index.dptr_);
mxnet::op::AddTakeGradLargeBatch(in_grad, sel_indices, dummy_index, out_grad);
} else if (kNullOp == req[0]) {
return;
} else {
LOG(FATAL) << "Not Implemented!";
}
}
inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
return static_cast<uint32_t>(1);
} else {
return static_cast<uint32_t>(2);
}
}
inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
if (param.ret_typ == topk_enum::kReturnBoth) {
return static_cast<uint32_t>(2);
} else {
return static_cast<uint32_t>(1);
}
}
inline bool TopKType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
return ElemwiseAttr<int, type_is_none, type_assign, true, type_string>(
attrs, in_attrs, out_attrs, -1);
}
inline bool TopKShapeImpl(const TopKParam& param,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
CHECK_EQ(out_attrs->size(), 1U);
} else {
CHECK_EQ(out_attrs->size(), 2U);
}
TShape& in_shape = (*in_attrs)[0];
int batch_size, element_num; // number of batches + the size of each batch
int axis = 0;
bool do_transpose = false;
bool is_ascend = false;
int k = 0;
TShape target_shape;
ParseTopKParam(in_shape, param,
&target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend);
if (param.ret_typ == topk_enum::kReturnIndices ||
param.ret_typ == topk_enum::kReturnMask) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, target_shape);
}
return true;
}
inline bool TopKShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed);
return TopKShapeImpl(param, in_attrs, out_attrs);
}
inline bool SortShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const SortParam& param = nnvm::get<SortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnValue;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
inline bool ArgSortShape(const nnvm::NodeAttrs& attrs,
std::vector<TShape> *in_attrs,
std::vector<TShape> *out_attrs) {
const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed);
TopKParam topk_param;
topk_param.axis = param.axis;
topk_param.is_ascend = param.is_ascend;
topk_param.k = 0;
topk_param.ret_typ = topk_enum::kReturnIndices;
return TopKShapeImpl(topk_param, in_attrs, out_attrs);
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
|
main.c | #include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define N 1000
#define Eps 1e-7
#pragma omp declare target
void func_1v(float*, float*, unsigned);
void func_2v(float*, float*, unsigned);
void func_3v(float*, float*, unsigned);
#pragma omp end declare target
void hfunc0(float*, float*, unsigned);
void hfunc1(float*, float*, unsigned);
void hfunc2(float*, float*, unsigned);
void hfunc3(float*, float*, unsigned);
int main(){
float a[N], t1[N], t2[N], s = 0;
unsigned i;
unsigned nErr = 0;
srand((unsigned int)time(NULL));
#pragma omp parallel for
for(i=0; i<N; ++i){
a[i]=rand()%100;
}
func_1v(a,t1,N);
func_3v(a,t2,N);
#pragma omp parallel for reduction(+:s)
for(i=0; i<N; ++i) s += t1[i];
if(s < Eps){
printf("Check 0: All elemets are zeros!\n");
return -1;
}
for(i=0; i<N; ++i){
if(fabs(t1[i]-t2[i]) >= Eps){
++nErr;
printf("Check 1: error at %d: %e >= %e\n",i,fabs(t1[i]-t2[i]),Eps);
}
}
func_2v(t1,t2,N);
for(i=0; i<N; ++i){
if(fabs(a[i]-t2[i]) >= Eps){
++nErr;
printf("Check 2: error at %d: %e >= %e\n",i,fabs(a[i]-t2[i]),Eps);
}
}
hfunc0(a, t1, N);
hfunc1(a, t1, N);
hfunc3(a, t2, N);
hfunc2(t1, t2, N);
if(!nErr) printf("Success\n");
return nErr;
}
|
full_matrix.h | /*
Copyright (c) 2020, VSB - Technical University of Ostrava and Graz University of
Technology
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the names of VSB - Technical University of Ostrava and Graz
University of Technology nor the names of its contributors may be used to
endorse or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS”
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL VSB - TECHNICAL UNIVERSITY OF OSTRAVA AND
GRAZ UNIVERSITY OF TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** @file full_matrix.h
* @brief
*/
#ifndef INCLUDE_BESTHEA_FULL_MATRIX_H_
#define INCLUDE_BESTHEA_FULL_MATRIX_H_
#include "besthea/matrix.h"
#include "besthea/settings.h"
#include <iostream>
#include <mkl.h>
#include <vector>
namespace besthea {
namespace linear_algebra {
class full_matrix;
}
}
/**
* Class representing a full matrix.
*/
class besthea::linear_algebra::full_matrix
: public besthea::linear_algebra::matrix {
public:
using vector_type = besthea::linear_algebra::vector; //!< Vector type.
/**
* Default constructor.
*/
full_matrix( );
/**
* Copy constructor.
* @param[in] that Matrix to be deep copied.
*/
full_matrix( const full_matrix & that );
/**
* Constructor with an initializer list.
* @param[in] n_rows Number of rows.
* @param[in] n_columns Number of columns.
* @param[in] list Initializer list for std::vector.
*/
full_matrix( lo n_rows, lo n_columns, std::initializer_list< sc > list );
/**
* Constructing a matrix of the given dimension.
* @param[in] n_rows Number of rows.
* @param[in] n_columns Number of columns.
* @param[in] zero Initialize to 0 if true.
*/
full_matrix( lo n_rows, lo n_columns, bool zero = true );
/**
* Destructor.
*/
virtual ~full_matrix( );
/*!
* @brief Prints the matrix.
* @param[in] stream
*/
void print( std::ostream & stream = std::cout ) const;
/*!
* @brief Fills the matrix with the given value.
* @param[in] value
*/
void fill( sc value ) {
std::fill( _data.begin( ), _data.end( ), value );
}
/*!
* @brief Fills the diagonal of the matrix with the given value.
* @param[in] value
*/
void fill_diag( sc value );
/*!
* @brief Fills the matrix with random numbers (uniform distribution).
* @param[in] lower Lower bound.
* @param[in] upper Upper bound.
*/
void random_fill( sc lower, sc upper );
/*!
* @brief Fills the matrix diagonal with random numbers (uniform
* distribution).
* @param[in] lower Lower bound.
* @param[in] upper Upper bound.
*/
void random_fill_diag( sc lower, sc upper );
/*!
* @brief Returns the (i,j)-th element of the matrix.
* @param[in] i Row index.
* @param[in] j Column index.
*/
sc get( lo i, lo j ) const {
return _data[ i + j * _n_rows ];
}
/*!
* @brief Sets the (i,j)-th element of the matrix.
* @param[in] i Row index.
* @param[in] j Column index.
* @param[in] value Value to be set.
*/
void set( lo i, lo j, sc value ) {
_data[ i + j * _n_rows ] = value;
}
/*!
* @brief Adds value to the (i,j)-th element of the matrix.
* @param[in] i Row index.
* @param[in] j Column index.
* @param[in] value Value to be set.
*/
void add( lo i, lo j, sc value ) {
_data[ i + j * _n_rows ] += value;
}
/*!
* @brief Atomically adds value to the (i,j)-th element of the matrix.
* @param[in] i Row index.
* @param[in] j Column index.
* @param[in] value Value to be set.
*/
void add_atomic( lo i, lo j, sc value ) {
#pragma omp atomic update
_data[ i + j * _n_rows ] += value;
}
/*!
* @brief Overloads the () operator.
* @param[in] i Row index.
* @param[in] j Column index.
*/
sc & operator( )( lo i, lo j ) {
return _data[ i + j * _n_rows ];
}
/*!
* @brief Overloads the () operator.
* @param[in] i Row index.
* @param[in] j Column index.
*/
sc operator( )( lo i, lo j ) const {
return _data[ i + j * _n_rows ];
}
/*!
* @brief Returns the raw data.
*/
sc * data( ) {
return _data.data( );
}
/*!
* @brief Returns the raw data.
*/
const sc * data( ) const {
return _data.data( );
}
/*!
* @brief y = beta * y + alpha * (this)^trans * x.
* @param[in] x
* @param[in,out] y
* @param[in] trans Flag for transpose.
* @param[in] alpha
* @param[in] beta
*/
virtual void apply( const vector_type & x, vector_type & y,
bool trans = false, sc alpha = 1.0, sc beta = 0.0 ) const;
/*!
* @brief y = beta * y + alpha * this * x.
* @param[in] x
* @param[in,out] y
* @param[in] alpha
* @param[in] beta
*/
void apply_symmetric(
vector const & x, vector_type & y, sc alpha = 1.0, sc beta = 0.0 ) const;
/*!
* @brief C = alpha * A * B + beta * C, where C is this matrix
* @param[in] A
* @param[in] B
* @param[in] trans_A
* @param[in] trans_B
* @param[in] alpha
* @param[in] beta
*/
void multiply( full_matrix const & A, full_matrix const & B,
bool trans_A = false, bool trans_B = false, sc alpha = 1.0, sc beta = 0.0 );
/*!
* @brief In-place LU decomposition and solution.
* @param[in,out] rhs Right-hand side overwritten by the result.
* @param[in] n_rhs Number of right-hand sides.
* @param[in] trans Flag for transpose.
*/
void lu_decompose_solve(
vector_type & rhs, lo n_rhs = 1, bool trans = false );
/*!
* @brief In-place Cholesky decomposition and solution.
* @param[in,out] rhs Right-hand side overwritten by the result.
* @param[in] n_rhs Number of right-hand sides.
*/
void cholesky_decompose_solve( vector_type & rhs, lo n_rhs = 1 );
/*!
* @brief In-place Cholesky decomposition.
*/
void cholesky_decompose( );
/*!
* @brief Cholesky solution
* @param[in,out] rhs Right-hand side overwritten by the result.
* @param[in] n_rhs Number of right-hand sides.
*/
void cholesky_solve( vector_type & rhs, lo n_rhs = 1 );
/*!
* Resizes the matrix.
* @param[in] n_rows Number of rows.
* @param[in] n_columns Number of columns.
*/
void resize( lo n_rows, lo n_columns ) {
_data.resize( n_rows * n_columns );
_data.shrink_to_fit( );
_n_rows = n_rows;
_n_columns = n_columns;
}
protected:
std::vector< sc, besthea::allocator_type< sc > > _data; //!< Raw data.
};
#endif /* INCLUDE_BESTHEA_FULL_MATRIX_H_ */
|
momentum_diff_diss.c | /*
This source file is part of the Geophysical Fluids Modeling Framework (GAME), which is released under the MIT license.
Github repository: https://github.com/OpenNWP/GAME
*/
/*
The momentum diffusion acceleration is computed here (apart from the diffusion coefficients).
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <geos95.h>
#include "../game_types.h"
#include "../game_constants.h"
#include "spatial_operators.h"
#include "../subgrid_scale/subgrid_scale.h"
#include "../thermodynamics/thermodynamics.h"
int hor_calc_curl_of_vorticity(Curl_field, Vector_field, double [], Grid *, Dualgrid *);
int hori_momentum_diffusion(State *state, Diagnostics *diagnostics, Irreversible_quantities *irrev, Config *config, Grid *grid, Dualgrid *dualgrid)
{
/*
This is the horizontal momentum diffusion operator (horizontal diffusion of horizontal velocity).
*/
// calculating the divergence of the wind field
divv_h(state -> wind, diagnostics -> wind_divv, grid);
// calculating the relative vorticity of the wind field
calc_rel_vort(state -> wind, diagnostics, grid, dualgrid);
// calculating the effective horizontal kinematic viscosity acting on divergences (eddy viscosity)
hori_div_viscosity(state, irrev, grid, diagnostics, config);
// calculating the effective horizontal kinematic viscosity acting on vorticities on rhombi (eddy viscosity)
hori_curl_viscosity_rhombi(state, irrev, grid, diagnostics, config);
// calculating the effective horizontal kinematic viscosity acting on vorticities on triangles (eddy viscosity)
hori_curl_viscosity_triangles(state, irrev, grid, dualgrid, diagnostics, config);
/*
gradient of divergence component
*/
scalar_times_scalar(irrev -> viscosity_div, diagnostics -> wind_divv, diagnostics -> wind_divv);
grad_hor(diagnostics -> wind_divv, diagnostics -> vector_field_placeholder, grid);
/*
curl of vorticity component
*/
#pragma omp parallel for
for (int h_index = 0; h_index < NO_OF_VECTORS_H; ++h_index)
{
for (int layer_index = 0; layer_index < NO_OF_LAYERS; ++layer_index)
{
// multiplying the diffusion coefficient by the relative vorticity
// diagnostics -> rel_vort is a misuse of name
diagnostics -> rel_vort[NO_OF_VECTORS_H + 2*layer_index*NO_OF_VECTORS_H + h_index]
= irrev -> viscosity_curl_rhombi[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + h_index]
*diagnostics -> rel_vort[NO_OF_VECTORS_H + 2*layer_index*NO_OF_VECTORS_H + h_index];
}
}
#pragma omp parallel for
for (int i = 0; i < NO_OF_DUAL_V_VECTORS; ++i)
{
diagnostics -> rel_vort_on_triangles[i] = irrev -> viscosity_curl_triangles[i]*diagnostics -> rel_vort_on_triangles[i];
}
hor_calc_curl_of_vorticity(diagnostics -> rel_vort, diagnostics -> rel_vort_on_triangles, diagnostics -> curl_of_vorticity, grid, dualgrid);
// adding up the two components of the momentum diffusion acceleration and dividing by the density at the edge
int vector_index, scalar_index_from, scalar_index_to;
#pragma omp parallel for private(vector_index, scalar_index_from, scalar_index_to)
for (int h_index = 0; h_index < NO_OF_VECTORS_H; ++h_index)
{
for (int layer_index = 0; layer_index < NO_OF_LAYERS; ++layer_index)
{
vector_index = NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + h_index;
scalar_index_from = layer_index*NO_OF_SCALARS_H + grid -> from_index[h_index];
scalar_index_to = layer_index*NO_OF_SCALARS_H + grid -> to_index[h_index];
irrev -> friction_acc[vector_index] =
(diagnostics -> vector_field_placeholder[vector_index] - diagnostics -> curl_of_vorticity[vector_index])
/(0.5*(density_gas(state, scalar_index_from) + density_gas(state, scalar_index_to)));
}
}
return 0;
}
int vert_momentum_diffusion(State *state, Diagnostics *diagnostics, Irreversible_quantities *irrev, Grid *grid, Config *config, double delta_t)
{
/*
This is the vertical momentum diffusion. The horizontal diffusion has already been called at this points, so we can add the new tendencies.
*/
// 1.) vertical diffusion of horizontal velocity
// ---------------------------------------------
int layer_index, h_index, vector_index;
// calculating the vertical gradient of the horizontal velocity at half levels
#pragma omp parallel for private(layer_index, h_index, vector_index)
for (int i = NO_OF_VECTORS_H; i < NO_OF_H_VECTORS + NO_OF_VECTORS_H; ++i)
{
layer_index = i/NO_OF_VECTORS_H;
h_index = i - layer_index*NO_OF_VECTORS_H;
vector_index = NO_OF_SCALARS_H + h_index + (layer_index - 1)*NO_OF_VECTORS_PER_LAYER;
// at the surface
if (layer_index == NO_OF_LAYERS)
{
diagnostics -> dv_hdz[i] = state -> wind[vector_index]
/(grid -> z_vector[vector_index]
- 0.5*(grid -> z_vector[NO_OF_VECTORS - NO_OF_SCALARS_H + grid -> from_index[h_index]]
+ grid -> z_vector[NO_OF_VECTORS - NO_OF_SCALARS_H + grid -> to_index[h_index]]));
}
// inner layers
else if (layer_index >= 1)
{
diagnostics -> dv_hdz[i] = (state -> wind[vector_index]
- state -> wind[vector_index + NO_OF_VECTORS_PER_LAYER])
/(grid -> z_vector[vector_index]
- grid -> z_vector[vector_index + NO_OF_VECTORS_PER_LAYER]);
}
// the second derivative is assumed to vanish at the TOA
else if (layer_index == 1)
{
diagnostics -> dv_hdz[i - NO_OF_VECTORS_H] = diagnostics -> dv_hdz[i];
}
}
// calculating the respective diffusion coefficient
vert_hor_mom_viscosity(state, irrev, diagnostics, config, grid, delta_t);
// now, the second derivative needs to be taken
double z_upper, z_lower, delta_z;
#pragma omp parallel for private(layer_index, h_index, vector_index, z_upper, z_lower, delta_z)
for (int i = 0; i < NO_OF_H_VECTORS; ++i)
{
layer_index = i/NO_OF_VECTORS_H;
h_index = i - layer_index*NO_OF_VECTORS_H;
vector_index = NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + h_index;
z_upper = 0.5*(grid -> z_vector[layer_index*NO_OF_VECTORS_PER_LAYER + grid -> from_index[h_index]]
+ grid -> z_vector[layer_index*NO_OF_VECTORS_PER_LAYER + grid -> to_index[h_index]]);
z_lower = 0.5*(grid -> z_vector[(layer_index + 1)*NO_OF_VECTORS_PER_LAYER + grid -> from_index[h_index]]
+ grid -> z_vector[(layer_index + 1)*NO_OF_VECTORS_PER_LAYER + grid -> to_index[h_index]]);
delta_z = z_upper - z_lower;
irrev -> friction_acc[vector_index] +=
(irrev -> vert_hor_viscosity[i]*diagnostics -> dv_hdz[i]
- irrev -> vert_hor_viscosity[i + NO_OF_VECTORS_H]*diagnostics -> dv_hdz[i + NO_OF_VECTORS_H])/delta_z
/(0.5*(density_gas(state, layer_index*NO_OF_SCALARS_H + grid -> from_index[h_index]) + density_gas(state, layer_index*NO_OF_SCALARS_H + grid -> to_index[h_index])));
}
// 2.) vertical diffusion of vertical velocity
// -------------------------------------------
// resetting the placeholder field
#pragma omp parallel for
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
diagnostics -> scalar_field_placeholder[i] = 0;
}
// computing something like dw/dz
add_vertical_divv(state -> wind, diagnostics -> scalar_field_placeholder, grid);
// computing and multiplying by the respective diffusion coefficient
vert_w_viscosity(state, grid, diagnostics, irrev, delta_t);
// taking the second derivative to compute the diffusive tendency
grad_vert_cov(diagnostics -> scalar_field_placeholder, irrev -> friction_acc, grid);
// 3.) horizontal diffusion of vertical velocity
// ---------------------------------------------
// the diffusion coefficient is the same as the one for vertical diffusion of horizontal velocity
// averaging the vertical velocity vertically to cell centers, using the inner product weights
int i;
#pragma omp parallel for private(i)
for (int h_index = 0; h_index < NO_OF_SCALARS_H; ++h_index)
{
for (int layer_index = 0; layer_index < NO_OF_LAYERS; ++layer_index)
{
i = layer_index*NO_OF_SCALARS_H + h_index;
diagnostics -> scalar_field_placeholder[i] =
grid -> inner_product_weights[8*i + 6]*state -> wind[h_index + layer_index*NO_OF_VECTORS_PER_LAYER]
+ grid -> inner_product_weights[8*i + 7]*state -> wind[h_index + (layer_index + 1)*NO_OF_VECTORS_PER_LAYER];
}
}
// computing the horizontal gradient of the vertical velocity field
grad_hor(diagnostics -> scalar_field_placeholder, diagnostics -> vector_field_placeholder, grid);
// multiplying by the already computed diffusion coefficient
#pragma omp parallel for private(vector_index)
for (int h_index = 0; h_index < NO_OF_VECTORS_H; ++h_index)
{
for (int layer_index = 0; layer_index < NO_OF_LAYERS; ++layer_index)
{
vector_index = NO_OF_SCALARS_H + h_index + layer_index*NO_OF_VECTORS_PER_LAYER;
if (layer_index == 0)
{
diagnostics -> vector_field_placeholder[vector_index] = 0.5*irrev -> vert_hor_viscosity[layer_index*NO_OF_VECTORS_H + h_index]
*diagnostics -> vector_field_placeholder[vector_index];
}
else if (layer_index == NO_OF_LAYERS - 1)
{
diagnostics -> vector_field_placeholder[vector_index] = 0.5*irrev -> vert_hor_viscosity[(layer_index - 1)*NO_OF_VECTORS_H + h_index]
*diagnostics -> vector_field_placeholder[vector_index];
}
else
{
diagnostics -> vector_field_placeholder[vector_index] = 0.5
*(irrev -> vert_hor_viscosity[(layer_index - 1)*NO_OF_VECTORS_H + h_index] + irrev -> vert_hor_viscosity[layer_index*NO_OF_VECTORS_H + h_index])
*diagnostics -> vector_field_placeholder[vector_index];
}
}
}
// the divergence of the diffusive flux density results in the diffusive acceleration
divv_h(diagnostics -> vector_field_placeholder, diagnostics -> scalar_field_placeholder, grid);
// vertically averaging the divergence to half levels and dividing by the density
#pragma omp parallel for private(layer_index, h_index, vector_index)
for (int i = 0; i < NO_OF_V_VECTORS - 2*NO_OF_SCALARS_H; ++i)
{
layer_index = i/NO_OF_SCALARS_H;
h_index = i - layer_index*NO_OF_SCALARS_H;
vector_index = h_index + (layer_index + 1)*NO_OF_VECTORS_PER_LAYER;
// finally adding the result
irrev -> friction_acc[vector_index] += 0.5*(
diagnostics -> scalar_field_placeholder[h_index + layer_index*NO_OF_SCALARS_H]
+ diagnostics -> scalar_field_placeholder[h_index + (layer_index + 1)*NO_OF_SCALARS_H]);
// dividing by the density
irrev -> friction_acc[vector_index] = irrev -> friction_acc[vector_index]
/(0.5*(density_gas(state, h_index + layer_index*NO_OF_SCALARS_H) + density_gas(state, h_index + (layer_index + 1)*NO_OF_SCALARS_H)));
}
return 0;
}
int hor_calc_curl_of_vorticity(Curl_field vorticity, double rel_vort_on_triangles[], Vector_field out_field, Grid *grid, Dualgrid *dualgrid)
{
/*
calculates the curl of the vertical vorticity
*/
int layer_index, h_index, vector_index, upper_index_z, lower_index_z, upper_index_zeta, lower_index_zeta, base_index;
double delta_z, delta_x, tangential_slope, delta_zeta, dzeta_dz, checkerboard_damping_weight;
#pragma omp parallel for private(layer_index, h_index, vector_index, delta_z, delta_x, tangential_slope, dzeta_dz, upper_index_z, lower_index_z, upper_index_zeta, lower_index_zeta, checkerboard_damping_weight, base_index)
for (int i = 0; i < NO_OF_H_VECTORS; ++i)
{
// Remember: (curl(zeta))*e_x = dzeta_z/dy - dzeta_y/dz = (dz*dzeta_z - dy*dzeta_y)/(dy*dz) = (dz*dzeta_z - dy*dzeta_y)/area (Stokes' Theorem, which is used here)
layer_index = i/NO_OF_VECTORS_H;
h_index = i - layer_index*NO_OF_VECTORS_H;
vector_index = NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + h_index;
out_field[vector_index] = 0;
delta_z = 0;
checkerboard_damping_weight =
fabs(rel_vort_on_triangles[layer_index*NO_OF_DUAL_SCALARS_H + dualgrid -> to_index[h_index]]
- rel_vort_on_triangles[layer_index*NO_OF_DUAL_SCALARS_H + dualgrid -> from_index[h_index]])
/(fabs(rel_vort_on_triangles[layer_index*NO_OF_DUAL_SCALARS_H + dualgrid -> to_index[h_index]])
+ fabs(rel_vort_on_triangles[layer_index*NO_OF_DUAL_SCALARS_H + dualgrid -> from_index[h_index]]) + EPSILON_SECURITY);
base_index = NO_OF_VECTORS_H + layer_index*NO_OF_DUAL_VECTORS_PER_LAYER;
// horizontal difference of vertical vorticity (dzeta_z*dz)
// An averaging over three rhombi must be done.
for (int j = 0; j < 3; ++j)
{
out_field[vector_index] +=
// This prefactor accounts for the fact that we average over three rhombi and the weighting of the triangle voritcities.
+ 1.0/3*(1 - checkerboard_damping_weight)*(
// vertical length at the to_index_dual point
dualgrid -> normal_distance[base_index + dualgrid -> to_index[h_index]]
// vorticity at the to_index_dual point
*vorticity[NO_OF_VECTORS_H + layer_index*2*NO_OF_VECTORS_H + dualgrid -> vorticity_indices_triangles[3*dualgrid -> to_index[h_index] + j]]
// vertical length at the from_index_dual point
- dualgrid -> normal_distance[base_index + dualgrid -> from_index[h_index]]
// vorticity at the from_index_dual point
*vorticity[NO_OF_VECTORS_H + layer_index*2*NO_OF_VECTORS_H + dualgrid -> vorticity_indices_triangles[3*dualgrid -> from_index[h_index] + j]]);
// preparation of the tangential slope
delta_z += 1.0/3*(
grid -> z_vector[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + dualgrid -> vorticity_indices_triangles[3*dualgrid -> to_index[h_index] + j]]
- grid -> z_vector[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + dualgrid -> vorticity_indices_triangles[3*dualgrid -> from_index[h_index] + j]]);
}
// adding the term damping the checkerboard pattern
out_field[vector_index] +=
checkerboard_damping_weight*(rel_vort_on_triangles[layer_index*NO_OF_DUAL_SCALARS_H + dualgrid -> to_index[h_index]]
*dualgrid -> normal_distance[base_index + dualgrid -> to_index[h_index]]
- rel_vort_on_triangles[layer_index*NO_OF_DUAL_SCALARS_H + dualgrid -> from_index[h_index]]
*dualgrid -> normal_distance[base_index + dualgrid -> from_index[h_index]]);
// Dividing by the area.
out_field[vector_index] = out_field[vector_index]/grid -> area[vector_index];
/*
terrain-following correction
*/
if (layer_index >= NO_OF_LAYERS - grid -> no_of_oro_layers)
{
// calculating the tangential slope
delta_x = dualgrid -> normal_distance[NO_OF_DUAL_VECTORS - NO_OF_VECTORS_H + h_index];
delta_x = delta_x*(RADIUS + grid -> z_vector[vector_index])/RADIUS;
tangential_slope = delta_z/delta_x;
// calculating the vertical gradient of the vertical vorticity
upper_index_z = NO_OF_SCALARS_H + (layer_index - 1)*NO_OF_VECTORS_PER_LAYER + h_index;
lower_index_z = NO_OF_SCALARS_H + (layer_index + 1)*NO_OF_VECTORS_PER_LAYER + h_index;
upper_index_zeta = NO_OF_VECTORS_H + (layer_index - 1)*2*NO_OF_VECTORS_H + h_index;
lower_index_zeta = NO_OF_VECTORS_H + (layer_index + 1)*2*NO_OF_VECTORS_H + h_index;
if (layer_index == 0)
{
upper_index_z = NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + h_index;
upper_index_zeta = NO_OF_VECTORS_H + layer_index*2*NO_OF_VECTORS_H + h_index;
}
if (layer_index == NO_OF_LAYERS - 1)
{
lower_index_z = NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + h_index;
lower_index_zeta = NO_OF_VECTORS_H + layer_index*2*NO_OF_VECTORS_H + h_index;
}
delta_zeta = vorticity[upper_index_zeta] - vorticity[lower_index_zeta];
delta_z = grid -> z_vector[upper_index_z] - grid -> z_vector[lower_index_z];
// the result
dzeta_dz = delta_zeta/delta_z;
out_field[vector_index] -= tangential_slope*dzeta_dz;
}
}
return 0;
}
int simple_dissipation_rate(State *state, Irreversible_quantities *irrev, Grid *grid)
{
/*
calculates a simplified dissipation rate
*/
inner_product(state -> wind, irrev -> friction_acc, irrev -> heating_diss, grid);
#pragma omp parallel for
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
irrev -> heating_diss[i] = -density_gas(state, i)*irrev -> heating_diss[i];
}
return 0;
}
|
OpenMPClause.h | //===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file defines OpenMP AST classes for clauses.
/// There are clauses for executable directives, clauses for declarative
/// directives and clauses which can be used in both kinds of directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H
#define LLVM_CLANG_AST_OPENMPCLAUSE_H
#include "clang/AST/Decl.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/TrailingObjects.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <utility>
namespace clang {
class ASTContext;
//===----------------------------------------------------------------------===//
// AST classes for clauses.
//===----------------------------------------------------------------------===//
/// This is a basic class for representing single OpenMP clause.
class OMPClause {
/// Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// Ending location of the clause.
SourceLocation EndLoc;
/// Kind of the clause.
OpenMPClauseKind Kind;
protected:
OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc)
: StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {}
public:
/// Returns the starting location of the clause.
SourceLocation getBeginLoc() const { return StartLoc; }
/// Returns the ending location of the clause.
SourceLocation getEndLoc() const { return EndLoc; }
/// Sets the starting location of the clause.
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// Sets the ending location of the clause.
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// Returns kind of OpenMP clause (private, shared, reduction, etc.).
OpenMPClauseKind getClauseKind() const { return Kind; }
bool isImplicit() const { return StartLoc.isInvalid(); }
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<OMPClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
/// Get the iterator range for the expressions used in the clauses. Used
/// expressions include only the children that must be evaluated at the
/// runtime before entering the construct.
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *) { return true; }
};
/// Class that handles pre-initialization statement for some clauses, like
/// 'shedule', 'firstprivate' etc.
class OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Pre-initialization statement for the clause.
Stmt *PreInit = nullptr;
/// Region that captures the associated stmt.
OpenMPDirectiveKind CaptureRegion = OMPD_unknown;
protected:
OMPClauseWithPreInit(const OMPClause *This) {
assert(get(This) && "get is not tuned for pre-init.");
}
/// Set pre-initialization statement for the clause.
void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = OMPD_unknown) {
PreInit = S;
CaptureRegion = ThisRegion;
}
public:
/// Get pre-initialization statement for the clause.
const Stmt *getPreInitStmt() const { return PreInit; }
/// Get pre-initialization statement for the clause.
Stmt *getPreInitStmt() { return PreInit; }
/// Get capture region for the stmt in the clause.
OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; }
static OMPClauseWithPreInit *get(OMPClause *C);
static const OMPClauseWithPreInit *get(const OMPClause *C);
};
/// Class that handles post-update expression for some clauses, like
/// 'lastprivate', 'reduction' etc.
class OMPClauseWithPostUpdate : public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Post-update expression for the clause.
Expr *PostUpdate = nullptr;
protected:
OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) {
assert(get(This) && "get is not tuned for post-update.");
}
/// Set pre-initialization statement for the clause.
void setPostUpdateExpr(Expr *S) { PostUpdate = S; }
public:
/// Get post-update expression for the clause.
const Expr *getPostUpdateExpr() const { return PostUpdate; }
/// Get post-update expression for the clause.
Expr *getPostUpdateExpr() { return PostUpdate; }
static OMPClauseWithPostUpdate *get(OMPClause *C);
static const OMPClauseWithPostUpdate *get(const OMPClause *C);
};
/// This structure contains most locations needed for by an OMPVarListClause.
struct OMPVarListLocTy {
/// Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// Location of '('.
SourceLocation LParenLoc;
/// Ending location of the clause.
SourceLocation EndLoc;
OMPVarListLocTy() = default;
OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {}
};
/// This represents clauses with the list of variables like 'private',
/// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the
/// '#pragma omp ...' directives.
template <class T> class OMPVarListClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of variables in the list.
unsigned NumVars;
protected:
/// Build a clause with \a N variables
///
/// \param K Kind of the clause.
/// \param StartLoc Starting location of the clause (the clause keyword).
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N)
: OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {}
/// Fetches list of variables associated with this clause.
MutableArrayRef<Expr *> getVarRefs() {
return MutableArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars);
}
/// Sets the list of variables for this clause.
void setVarRefs(ArrayRef<Expr *> VL) {
assert(VL.size() == NumVars &&
"Number of variables is not the same as the preallocated buffer");
std::copy(VL.begin(), VL.end(),
static_cast<T *>(this)->template getTrailingObjects<Expr *>());
}
public:
using varlist_iterator = MutableArrayRef<Expr *>::iterator;
using varlist_const_iterator = ArrayRef<const Expr *>::iterator;
using varlist_range = llvm::iterator_range<varlist_iterator>;
using varlist_const_range = llvm::iterator_range<varlist_const_iterator>;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVarRefs().begin(); }
varlist_iterator varlist_end() { return getVarRefs().end(); }
varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); }
varlist_const_iterator varlist_end() const { return getVarRefs().end(); }
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Fetches list of all variables in the clause.
ArrayRef<const Expr *> getVarRefs() const {
return llvm::makeArrayRef(
static_cast<const T *>(this)->template getTrailingObjects<Expr *>(),
NumVars);
}
};
/// This represents 'allocator' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp allocate(a) allocator(omp_default_mem_alloc)
/// \endcode
/// In this example directive '#pragma omp allocate' has simple 'allocator'
/// clause with the allocator 'omp_default_mem_alloc'.
class OMPAllocatorClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Expression with the allocator.
Stmt *Allocator = nullptr;
/// Set allocator.
void setAllocator(Expr *A) { Allocator = A; }
public:
/// Build 'allocator' clause with the given allocator.
///
/// \param A Allocator.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc),
Allocator(A) {}
/// Build an empty clause.
OMPAllocatorClause()
: OMPClause(OMPC_allocator, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns allocator.
Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); }
child_range children() { return child_range(&Allocator, &Allocator + 1); }
const_child_range children() const {
return const_child_range(&Allocator, &Allocator + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_allocator;
}
};
/// This represents clause 'allocate' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// and clause 'allocate' for the variable 'a'.
class OMPAllocateClause final
: public OMPVarListClause<OMPAllocateClause>,
private llvm::TrailingObjects<OMPAllocateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Allocator specified in the clause, or 'nullptr' if the default one is
/// used.
Expr *Allocator = nullptr;
/// Position of the ':' delimiter in the clause;
SourceLocation ColonLoc;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Allocator Allocator expression.
/// \param ColonLoc Location of ':' delimiter.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
Expr *Allocator, SourceLocation ColonLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPAllocateClause>(OMPC_allocate, StartLoc, LParenLoc,
EndLoc, N),
Allocator(Allocator), ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPAllocateClause(unsigned N)
: OMPVarListClause<OMPAllocateClause>(OMPC_allocate, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
void setAllocator(Expr *A) { Allocator = A; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Allocator Allocator expression.
/// \param ColonLoc Location of ':' delimiter.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, Expr *Allocator,
SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Returns the allocator expression or nullptr, if no allocator is specified.
Expr *getAllocator() const { return Allocator; }
/// Returns the location of the ':' delimiter.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPAllocateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_allocate;
}
};
/// This represents 'if' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel if(parallel:a > 5)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'if' clause with
/// condition 'a > 5' and directive name modifier 'parallel'.
class OMPIfClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'if' clause.
Stmt *Condition = nullptr;
/// Location of ':' (if any).
SourceLocation ColonLoc;
/// Directive name modifier for the clause.
OpenMPDirectiveKind NameModifier = OMPD_unknown;
/// Name modifier location.
SourceLocation NameModifierLoc;
/// Set condition.
void setCondition(Expr *Cond) { Condition = Cond; }
/// Set directive name modifier for the clause.
void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; }
/// Set location of directive name modifier for the clause.
void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; }
/// Set location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Build 'if' clause with condition \a Cond.
///
/// \param NameModifier [OpenMP 4.1] Directive name modifier of clause.
/// \param Cond Condition of the clause.
/// \param HelperCond Helper condition for the clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param NameModifierLoc Location of directive name modifier.
/// \param ColonLoc [OpenMP 4.1] Location of ':'.
/// \param EndLoc Ending location of the clause.
OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation NameModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc)
: OMPClause(OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc),
NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
OMPIfClause()
: OMPClause(OMPC_if, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
/// Return directive name modifier associated with the clause.
OpenMPDirectiveKind getNameModifier() const { return NameModifier; }
/// Return the location of directive name modifier.
SourceLocation getNameModifierLoc() const { return NameModifierLoc; }
child_range children() { return child_range(&Condition, &Condition + 1); }
const_child_range children() const {
return const_child_range(&Condition, &Condition + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPIfClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_if;
}
};
/// This represents 'final' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task final(a > 5)
/// \endcode
/// In this example directive '#pragma omp task' has simple 'final'
/// clause with condition 'a > 5'.
class OMPFinalClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'if' clause.
Stmt *Condition = nullptr;
/// Set condition.
void setCondition(Expr *Cond) { Condition = Cond; }
public:
/// Build 'final' clause with condition \a Cond.
///
/// \param Cond Condition of the clause.
/// \param HelperCond Helper condition for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPFinalClause(Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_final, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Condition(Cond) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
OMPFinalClause()
: OMPClause(OMPC_final, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
child_range children() { return child_range(&Condition, &Condition + 1); }
const_child_range children() const {
return const_child_range(&Condition, &Condition + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPFinalClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_final;
}
};
/// This represents 'num_threads' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel num_threads(6)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'num_threads'
/// clause with number of threads '6'.
class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'num_threads' clause.
Stmt *NumThreads = nullptr;
/// Set condition.
void setNumThreads(Expr *NThreads) { NumThreads = NThreads; }
public:
/// Build 'num_threads' clause with condition \a NumThreads.
///
/// \param NumThreads Number of threads for the construct.
/// \param HelperNumThreads Helper Number of threads for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_num_threads, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc),
NumThreads(NumThreads) {
setPreInitStmt(HelperNumThreads, CaptureRegion);
}
/// Build an empty clause.
OMPNumThreadsClause()
: OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns number of threads.
Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); }
child_range children() { return child_range(&NumThreads, &NumThreads + 1); }
const_child_range children() const {
return const_child_range(&NumThreads, &NumThreads + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_threads;
}
};
/// This represents 'safelen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd safelen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'safelen'
/// with single expression '4'.
/// If the safelen clause is used then no two iterations executed
/// concurrently with SIMD instructions can have a greater distance
/// in the logical iteration space than its value. The parameter of
/// the safelen clause must be a constant positive integer expression.
class OMPSafelenClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Safelen = nullptr;
/// Set safelen.
void setSafelen(Expr *Len) { Safelen = Len; }
public:
/// Build 'safelen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc),
Safelen(Len) {}
/// Build an empty clause.
explicit OMPSafelenClause()
: OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); }
child_range children() { return child_range(&Safelen, &Safelen + 1); }
const_child_range children() const {
return const_child_range(&Safelen, &Safelen + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_safelen;
}
};
/// This represents 'simdlen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd simdlen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'simdlen'
/// with single expression '4'.
/// If the 'simdlen' clause is used then it specifies the preferred number of
/// iterations to be executed concurrently. The parameter of the 'simdlen'
/// clause must be a constant positive integer expression.
class OMPSimdlenClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Simdlen = nullptr;
/// Set simdlen.
void setSimdlen(Expr *Len) { Simdlen = Len; }
public:
/// Build 'simdlen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc),
Simdlen(Len) {}
/// Build an empty clause.
explicit OMPSimdlenClause()
: OMPClause(OMPC_simdlen, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); }
child_range children() { return child_range(&Simdlen, &Simdlen + 1); }
const_child_range children() const {
return const_child_range(&Simdlen, &Simdlen + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_simdlen;
}
};
/// This represents 'collapse' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd collapse(3)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'collapse'
/// with single expression '3'.
/// The parameter must be a constant positive integer expression, it specifies
/// the number of nested loops that should be collapsed into a single iteration
/// space.
class OMPCollapseClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of for-loops.
Stmt *NumForLoops = nullptr;
/// Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// Build 'collapse' clause.
///
/// \param Num Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPCollapseClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num) {}
/// Build an empty clause.
explicit OMPCollapseClause()
: OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
const_child_range children() const {
return const_child_range(&NumForLoops, &NumForLoops + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_collapse;
}
};
/// This represents 'default' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel default(shared)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'default'
/// clause with kind 'shared'.
class OMPDefaultClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'default' clause.
OpenMPDefaultClauseKind Kind = OMPC_DEFAULT_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clauses.
///
/// \param K Argument of clause.
void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; }
/// Set argument location.
///
/// \param KLoc Argument location.
void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'default' clause with argument \a A ('none' or 'shared').
///
/// \param A Argument of the clause ('none' or 'shared').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPDefaultClause()
: OMPClause(OMPC_default, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
OpenMPDefaultClauseKind getDefaultKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_default;
}
};
/// This represents 'proc_bind' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel proc_bind(master)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'proc_bind'
/// clause with kind 'master'.
class OMPProcBindClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'proc_bind' clause.
OpenMPProcBindClauseKind Kind = OMPC_PROC_BIND_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Kind of clause.
void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; }
/// Set clause kind location.
///
/// \param KLoc Kind location.
void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'proc_bind' clause with argument \a A ('master', 'close' or
/// 'spread').
///
/// \param A Argument of the clause ('master', 'close' or 'spread').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPProcBindClause()
: OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
OpenMPProcBindClauseKind getProcBindKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_proc_bind;
}
};
/// This represents 'unified_address' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires unified_address
/// \endcode
/// In this example directive '#pragma omp requires' has 'unified_address'
/// clause.
class OMPUnifiedAddressClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'unified_address' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_unified_address, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUnifiedAddressClause()
: OMPClause(OMPC_unified_address, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_unified_address;
}
};
/// This represents 'unified_shared_memory' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires unified_shared_memory
/// \endcode
/// In this example directive '#pragma omp requires' has 'unified_shared_memory'
/// clause.
class OMPUnifiedSharedMemoryClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'unified_shared_memory' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_unified_shared_memory, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUnifiedSharedMemoryClause()
: OMPClause(OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_unified_shared_memory;
}
};
/// This represents 'reverse_offload' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires reverse_offload
/// \endcode
/// In this example directive '#pragma omp requires' has 'reverse_offload'
/// clause.
class OMPReverseOffloadClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'reverse_offload' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_reverse_offload, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReverseOffloadClause()
: OMPClause(OMPC_reverse_offload, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_reverse_offload;
}
};
/// This represents 'dynamic_allocators' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires dynamic_allocators
/// \endcode
/// In this example directive '#pragma omp requires' has 'dynamic_allocators'
/// clause.
class OMPDynamicAllocatorsClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'dynamic_allocators' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_dynamic_allocators, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPDynamicAllocatorsClause()
: OMPClause(OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_dynamic_allocators;
}
};
/// This represents 'atomic_default_mem_order' clause in the '#pragma omp
/// requires' directive.
///
/// \code
/// #pragma omp requires atomic_default_mem_order(seq_cst)
/// \endcode
/// In this example directive '#pragma omp requires' has simple
/// atomic_default_mem_order' clause with kind 'seq_cst'.
class OMPAtomicDefaultMemOrderClause final : public OMPClause {
friend class OMPClauseReader;
/// Location of '('
SourceLocation LParenLoc;
/// A kind of the 'atomic_default_mem_order' clause.
OpenMPAtomicDefaultMemOrderClauseKind Kind =
OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Kind of clause.
void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) {
Kind = K;
}
/// Set clause kind location.
///
/// \param KLoc Kind location.
void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) {
KindKwLoc = KLoc;
}
public:
/// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst',
/// 'acq_rel' or 'relaxed').
///
/// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A,
SourceLocation ALoc, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_atomic_default_mem_order, StartLoc, EndLoc),
LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPAtomicDefaultMemOrderClause()
: OMPClause(OMPC_atomic_default_mem_order, SourceLocation(),
SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the locaiton of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const {
return Kind;
}
/// Returns location of clause kind.
SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_atomic_default_mem_order;
}
};
/// This represents 'schedule' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp for' has 'schedule' clause with
/// arguments 'static' and '3'.
class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'schedule' clause.
OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown;
/// Modifiers for 'schedule' clause.
enum {FIRST, SECOND, NUM_MODIFIERS};
OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS];
/// Locations of modifiers.
SourceLocation ModifiersLoc[NUM_MODIFIERS];
/// Start location of the schedule ind in source code.
SourceLocation KindLoc;
/// Location of ',' (if any).
SourceLocation CommaLoc;
/// Chunk size.
Expr *ChunkSize = nullptr;
/// Set schedule kind.
///
/// \param K Schedule kind.
void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; }
/// Set the first schedule modifier.
///
/// \param M Schedule modifier.
void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[FIRST] = M;
}
/// Set the second schedule modifier.
///
/// \param M Schedule modifier.
void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[SECOND] = M;
}
/// Set location of the first schedule modifier.
void setFirstScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[FIRST] = Loc;
}
/// Set location of the second schedule modifier.
void setSecondScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[SECOND] = Loc;
}
/// Set schedule modifier location.
///
/// \param M Schedule modifier location.
void setScheduleModifer(OpenMPScheduleClauseModifier M) {
if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown)
Modifiers[FIRST] = M;
else {
assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown);
Modifiers[SECOND] = M;
}
}
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// Set location of ','.
///
/// \param Loc Location of ','.
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// Set chunk size.
///
/// \param E Chunk size.
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// Build 'schedule' clause with schedule kind \a Kind and chunk size
/// expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind Schedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
/// \param M1 The first modifier applied to 'schedule' clause.
/// \param M1Loc Location of the first modifier
/// \param M2 The second modifier applied to 'schedule' clause.
/// \param M2Loc Location of the second modifier
OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc, OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, Stmt *HelperChunkSize,
OpenMPScheduleClauseModifier M1, SourceLocation M1Loc,
OpenMPScheduleClauseModifier M2, SourceLocation M2Loc)
: OMPClause(OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc),
ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
Modifiers[FIRST] = M1;
Modifiers[SECOND] = M2;
ModifiersLoc[FIRST] = M1Loc;
ModifiersLoc[SECOND] = M2Loc;
}
/// Build an empty clause.
explicit OMPScheduleClause()
: OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {
Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown;
Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown;
}
/// Get kind of the clause.
OpenMPScheduleClauseKind getScheduleKind() const { return Kind; }
/// Get the first modifier of the clause.
OpenMPScheduleClauseModifier getFirstScheduleModifier() const {
return Modifiers[FIRST];
}
/// Get the second modifier of the clause.
OpenMPScheduleClauseModifier getSecondScheduleModifier() const {
return Modifiers[SECOND];
}
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getScheduleKindLoc() { return KindLoc; }
/// Get the first modifier location.
SourceLocation getFirstScheduleModifierLoc() const {
return ModifiersLoc[FIRST];
}
/// Get the second modifier location.
SourceLocation getSecondScheduleModifierLoc() const {
return ModifiersLoc[SECOND];
}
/// Get location of ','.
SourceLocation getCommaLoc() { return CommaLoc; }
/// Get chunk size.
Expr *getChunkSize() { return ChunkSize; }
/// Get chunk size.
const Expr *getChunkSize() const { return ChunkSize; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
const_child_range children() const {
auto Children = const_cast<OMPScheduleClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_schedule;
}
};
/// This represents 'ordered' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for ordered (2)
/// \endcode
/// In this example directive '#pragma omp for' has 'ordered' clause with
/// parameter 2.
class OMPOrderedClause final
: public OMPClause,
private llvm::TrailingObjects<OMPOrderedClause, Expr *> {
friend class OMPClauseReader;
friend TrailingObjects;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of for-loops.
Stmt *NumForLoops = nullptr;
/// Real number of loops.
unsigned NumberOfLoops = 0;
/// Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param NumLoops Number of loops, associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num), NumberOfLoops(NumLoops) {}
/// Build an empty clause.
explicit OMPOrderedClause(unsigned NumLoops)
: OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()),
NumberOfLoops(NumLoops) {}
/// Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param NumLoops Number of loops, associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
static OMPOrderedClause *Create(const ASTContext &C, Expr *Num,
unsigned NumLoops, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Build an empty clause.
static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops);
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
/// Set number of iterations for the specified loop.
void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations);
/// Get number of iterations for all the loops.
ArrayRef<Expr *> getLoopNumIterations() const;
/// Set loop counter for the specified loop.
void setLoopCounter(unsigned NumLoop, Expr *Counter);
/// Get loops counter for the specified loop.
Expr *getLoopCounter(unsigned NumLoop);
const Expr *getLoopCounter(unsigned NumLoop) const;
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
const_child_range children() const {
return const_child_range(&NumForLoops, &NumForLoops + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_ordered;
}
};
/// This represents 'nowait' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for nowait
/// \endcode
/// In this example directive '#pragma omp for' has 'nowait' clause.
class OMPNowaitClause : public OMPClause {
public:
/// Build 'nowait' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_nowait, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPNowaitClause()
: OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nowait;
}
};
/// This represents 'untied' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task untied
/// \endcode
/// In this example directive '#pragma omp task' has 'untied' clause.
class OMPUntiedClause : public OMPClause {
public:
/// Build 'untied' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_untied, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUntiedClause()
: OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_untied;
}
};
/// This represents 'mergeable' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task mergeable
/// \endcode
/// In this example directive '#pragma omp task' has 'mergeable' clause.
class OMPMergeableClause : public OMPClause {
public:
/// Build 'mergeable' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_mergeable, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPMergeableClause()
: OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_mergeable;
}
};
/// This represents 'read' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic read
/// \endcode
/// In this example directive '#pragma omp atomic' has 'read' clause.
class OMPReadClause : public OMPClause {
public:
/// Build 'read' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_read, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_read;
}
};
/// This represents 'write' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic write
/// \endcode
/// In this example directive '#pragma omp atomic' has 'write' clause.
class OMPWriteClause : public OMPClause {
public:
/// Build 'write' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_write, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPWriteClause()
: OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_write;
}
};
/// This represents 'update' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic update
/// \endcode
/// In this example directive '#pragma omp atomic' has 'update' clause.
class OMPUpdateClause : public OMPClause {
public:
/// Build 'update' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_update, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUpdateClause()
: OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_update;
}
};
/// This represents 'capture' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has 'capture' clause.
class OMPCaptureClause : public OMPClause {
public:
/// Build 'capture' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_capture, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPCaptureClause()
: OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_capture;
}
};
/// This represents 'seq_cst' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic seq_cst
/// \endcode
/// In this example directive '#pragma omp atomic' has 'seq_cst' clause.
class OMPSeqCstClause : public OMPClause {
public:
/// Build 'seq_cst' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPSeqCstClause()
: OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_seq_cst;
}
};
/// This represents clause 'private' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// with the variables 'a' and 'b'.
class OMPPrivateClause final
: public OMPVarListClause<OMPPrivateClause>,
private llvm::TrailingObjects<OMPPrivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPPrivateClause(unsigned N)
: OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PrivateVL List of references to private copies with initializers.
static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PrivateVL);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPPrivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_private;
}
};
/// This represents clause 'firstprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel firstprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'firstprivate'
/// with the variables 'a' and 'b'.
class OMPFirstprivateClause final
: public OMPVarListClause<OMPFirstprivateClause>,
public OMPClauseWithPreInit,
private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPreInit(this) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFirstprivateClause(unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(
OMPC_firstprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPreInit(this) {}
/// Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new
/// private variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// Gets the list of references to initializer variables for new
/// private variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the original variables.
/// \param PrivateVL List of references to private copies with initializers.
/// \param InitVL List of references to auto generated variables used for
/// initialization of a single array element. Used if firstprivate variable is
/// of array type.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
static OMPFirstprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
ArrayRef<Expr *> InitVL, Stmt *PreInit);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFirstprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range used_children() const {
auto Children = const_cast<OMPFirstprivateClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_firstprivate;
}
};
/// This represents clause 'lastprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd lastprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'lastprivate'
/// with the variables 'a' and 'b'.
class OMPLastprivateClause final
: public OMPVarListClause<OMPLastprivateClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLastprivateClause, Expr *> {
// There are 4 additional tail-allocated arrays at the end of the class:
// 1. Contains list of pseudo variables with the default initialization for
// each non-firstprivate variables. Used in codegen for initialization of
// lastprivate copies.
// 2. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents private variables
// (for arrays, single array element).
// 3. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents original variables
// (for arrays, single array element).
// 4. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of final assignment performed by the
// lastprivate clause.
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPLastprivateClause(unsigned N)
: OMPVarListClause<OMPLastprivateClause>(
OMPC_lastprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Get the list of helper expressions for initialization of private
/// copies for lastprivate variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent original variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign private copy of the variable to original variable.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// private variables (for arrays, single array element).
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// original variables (for arrays, single array element).
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// lastprivate clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLastprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
/// Set list of helper expressions, required for generation of private
/// copies of original lastprivate variables.
void setPrivateCopies(ArrayRef<Expr *> PrivateCopies);
helper_expr_const_range private_copies() const {
return helper_expr_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_range private_copies() {
return helper_expr_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPLastprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_lastprivate;
}
};
/// This represents clause 'shared' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel shared(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'shared'
/// with the variables 'a' and 'b'.
class OMPSharedClause final
: public OMPVarListClause<OMPSharedClause>,
private llvm::TrailingObjects<OMPSharedClause, Expr *> {
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPSharedClause(unsigned N)
: OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPSharedClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_shared;
}
};
/// This represents clause 'reduction' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'reduction'
/// with operator '+' and the variables 'a' and 'b'.
class OMPReductionClause final
: public OMPVarListClause<OMPReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPReductionClause(unsigned N)
: OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(),
SourceLocation(), SourceLocation(),
N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private copy of the reduction
/// variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent LHS expression in the final
/// reduction expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent RHS expression in the final
/// reduction expression performed by the reduction clause.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range used_children() const {
auto Children = const_cast<OMPReductionClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_reduction;
}
};
/// This represents clause 'task_reduction' in the '#pragma omp taskgroup'
/// directives.
///
/// \code
/// #pragma omp taskgroup task_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp taskgroup' has clause
/// 'task_reduction' with operator '+' and the variables 'a' and 'b'.
class OMPTaskReductionClause final
: public OMPVarListClause<OMPTaskReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPTaskReductionClause>(OMPC_task_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPTaskReductionClause(unsigned N)
: OMPVarListClause<OMPTaskReductionClause>(
OMPC_task_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPTaskReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPTaskReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_task_reduction;
}
};
/// This represents clause 'in_reduction' in the '#pragma omp task' directives.
///
/// \code
/// #pragma omp task in_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp task' has clause 'in_reduction' with
/// operator '+' and the variables 'a' and 'b'.
class OMPInReductionClause final
: public OMPVarListClause<OMPInReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPInReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPInReductionClause>(OMPC_in_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPInReductionClause(unsigned N)
: OMPVarListClause<OMPInReductionClause>(
OMPC_in_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
/// Set list of helper reduction taskgroup descriptors.
void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction taskgroup descriptors.
MutableArrayRef<Expr *> getTaskgroupDescriptors() {
return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size());
}
ArrayRef<const Expr *> getTaskgroupDescriptors() const {
return llvm::makeArrayRef(getReductionOps().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param TaskgroupDescriptors List of helper taskgroup descriptors for
/// corresponding items in parent taskgroup task_reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPInReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_const_range taskgroup_descriptors() const {
return helper_expr_const_range(getTaskgroupDescriptors().begin(),
getTaskgroupDescriptors().end());
}
helper_expr_range taskgroup_descriptors() {
return helper_expr_range(getTaskgroupDescriptors().begin(),
getTaskgroupDescriptors().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPInReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_in_reduction;
}
};
/// This represents clause 'linear' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd linear(a,b : 2)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'linear'
/// with variables 'a', 'b' and linear step '2'.
class OMPLinearClause final
: public OMPVarListClause<OMPLinearClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLinearClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Modifier of 'linear' clause.
OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val;
/// Location of linear modifier if any.
SourceLocation ModifierLoc;
/// Location of ':'.
SourceLocation ColonLoc;
/// Sets the linear step for clause.
void setStep(Expr *Step) { *(getFinals().end()) = Step; }
/// Sets the expression to calculate linear step for clause.
void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; }
/// Build 'linear' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc,
EndLoc, NumVars),
OMPClauseWithPostUpdate(this), Modifier(Modifier),
ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPLinearClause(unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(),
SourceLocation(), SourceLocation(),
NumVars),
OMPClauseWithPostUpdate(this) {}
/// Gets the list of initial values for linear variables.
///
/// There are NumVars expressions with initial values allocated after the
/// varlist, they are followed by NumVars update expressions (used to update
/// the linear variable's value on current iteration) and they are followed by
/// NumVars final expressions (used to calculate the linear variable's
/// value after the loop body). After these lists, there are 2 helper
/// expressions - linear step and a helper to calculate it before the
/// loop body (used when the linear step is not constant):
///
/// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[];
/// Finals[]; Step; CalcStep; }
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Sets the list of update expressions for linear variables.
MutableArrayRef<Expr *> getUpdates() {
return MutableArrayRef<Expr *>(getInits().end(), varlist_size());
}
ArrayRef<const Expr *> getUpdates() const {
return llvm::makeArrayRef(getInits().end(), varlist_size());
}
/// Sets the list of final update expressions for linear variables.
MutableArrayRef<Expr *> getFinals() {
return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size());
}
ArrayRef<const Expr *> getFinals() const {
return llvm::makeArrayRef(getUpdates().end(), varlist_size());
}
/// Gets the list of used expressions for linear variables.
MutableArrayRef<Expr *> getUsedExprs() {
return MutableArrayRef<Expr *>(getFinals().end() + 2, varlist_size() + 1);
}
ArrayRef<const Expr *> getUsedExprs() const {
return llvm::makeArrayRef(getFinals().end() + 2, varlist_size() + 1);
}
/// Sets the list of the copies of original linear variables.
/// \param PL List of expressions.
void setPrivates(ArrayRef<Expr *> PL);
/// Sets the list of the initial values for linear variables.
/// \param IL List of expressions.
void setInits(ArrayRef<Expr *> IL);
public:
/// Creates clause with a list of variables \a VL and a linear step
/// \a Step.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Modifier Modifier of 'linear' clause.
/// \param ModifierLoc Modifier location.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PL List of private copies of original variables.
/// \param IL List of initial values for the variables.
/// \param Step Linear step.
/// \param CalcStep Calculation of the linear step.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLinearClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// Set modifier.
void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; }
/// Return modifier.
OpenMPLinearClauseKind getModifier() const { return Modifier; }
/// Set modifier location.
void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
/// Return modifier location.
SourceLocation getModifierLoc() const { return ModifierLoc; }
/// Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns linear step.
Expr *getStep() { return *(getFinals().end()); }
/// Returns linear step.
const Expr *getStep() const { return *(getFinals().end()); }
/// Returns expression to calculate linear step.
Expr *getCalcStep() { return *(getFinals().end() + 1); }
/// Returns expression to calculate linear step.
const Expr *getCalcStep() const { return *(getFinals().end() + 1); }
/// Sets the list of update expressions for linear variables.
/// \param UL List of expressions.
void setUpdates(ArrayRef<Expr *> UL);
/// Sets the list of final update expressions for linear variables.
/// \param FL List of expressions.
void setFinals(ArrayRef<Expr *> FL);
/// Sets the list of used expressions for the linear clause.
void setUsedExprs(ArrayRef<Expr *> UE);
using privates_iterator = MutableArrayRef<Expr *>::iterator;
using privates_const_iterator = ArrayRef<const Expr *>::iterator;
using privates_range = llvm::iterator_range<privates_iterator>;
using privates_const_range = llvm::iterator_range<privates_const_iterator>;
privates_range privates() {
return privates_range(getPrivates().begin(), getPrivates().end());
}
privates_const_range privates() const {
return privates_const_range(getPrivates().begin(), getPrivates().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
using updates_iterator = MutableArrayRef<Expr *>::iterator;
using updates_const_iterator = ArrayRef<const Expr *>::iterator;
using updates_range = llvm::iterator_range<updates_iterator>;
using updates_const_range = llvm::iterator_range<updates_const_iterator>;
updates_range updates() {
return updates_range(getUpdates().begin(), getUpdates().end());
}
updates_const_range updates() const {
return updates_const_range(getUpdates().begin(), getUpdates().end());
}
using finals_iterator = MutableArrayRef<Expr *>::iterator;
using finals_const_iterator = ArrayRef<const Expr *>::iterator;
using finals_range = llvm::iterator_range<finals_iterator>;
using finals_const_range = llvm::iterator_range<finals_const_iterator>;
finals_range finals() {
return finals_range(getFinals().begin(), getFinals().end());
}
finals_const_range finals() const {
return finals_const_range(getFinals().begin(), getFinals().end());
}
using used_expressions_iterator = MutableArrayRef<Expr *>::iterator;
using used_expressions_const_iterator = ArrayRef<const Expr *>::iterator;
using used_expressions_range =
llvm::iterator_range<used_expressions_iterator>;
using used_expressions_const_range =
llvm::iterator_range<used_expressions_const_iterator>;
used_expressions_range used_expressions() {
return finals_range(getUsedExprs().begin(), getUsedExprs().end());
}
used_expressions_const_range used_expressions() const {
return finals_const_range(getUsedExprs().begin(), getUsedExprs().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPLinearClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPLinearClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_linear;
}
};
/// This represents clause 'aligned' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd aligned(a,b : 8)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'aligned'
/// with variables 'a', 'b' and alignment '8'.
class OMPAlignedClause final
: public OMPVarListClause<OMPAlignedClause>,
private llvm::TrailingObjects<OMPAlignedClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Sets the alignment for clause.
void setAlignment(Expr *A) { *varlist_end() = A; }
/// Build 'aligned' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc,
EndLoc, NumVars),
ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPAlignedClause(unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(),
SourceLocation(), SourceLocation(),
NumVars) {}
public:
/// Creates clause with a list of variables \a VL and alignment \a A.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param A Alignment.
static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
Expr *A);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns alignment.
Expr *getAlignment() { return *varlist_end(); }
/// Returns alignment.
const Expr *getAlignment() const { return *varlist_end(); }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPAlignedClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_aligned;
}
};
/// This represents clause 'copyin' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel copyin(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'copyin'
/// with the variables 'a' and 'b'.
class OMPCopyinClause final
: public OMPVarListClause<OMPCopyinClause>,
private llvm::TrailingObjects<OMPCopyinClause, Expr *> {
// Class has 3 additional tail allocated arrays:
// 1. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents sources.
// 2. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents destinations.
// 3. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of propagation of master's thread values of
// threadprivate variables to local instances of that variables in other
// implicit threads.
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyinClause(unsigned N)
: OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyin clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyin clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of propagation of master's thread values of
/// threadprivate variables to local instances of that variables in other
/// implicit threads.
static OMPCopyinClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPCopyinClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_copyin;
}
};
/// This represents clause 'copyprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp single copyprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp single' has clause 'copyprivate'
/// with the variables 'a' and 'b'.
class OMPCopyprivateClause final
: public OMPVarListClause<OMPCopyprivateClause>,
private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyprivateClause(unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(
OMPC_copyprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// copyprivate clause.
static OMPCopyprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPCopyprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_copyprivate;
}
};
/// This represents implicit clause 'flush' for the '#pragma omp flush'
/// directive.
/// This clause does not exist by itself, it can be only as a part of 'omp
/// flush' directive. This clause is introduced to keep the original structure
/// of \a OMPExecutableDirective class and its derivatives and to use the
/// existing infrastructure of clauses with the list of variables.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has implicit clause 'flush'
/// with the variables 'a' and 'b'.
class OMPFlushClause final
: public OMPVarListClause<OMPFlushClause>,
private llvm::TrailingObjects<OMPFlushClause, Expr *> {
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFlushClause(unsigned N)
: OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> VL);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFlushClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_flush;
}
};
/// This represents implicit clause 'depend' for the '#pragma omp task'
/// directive.
///
/// \code
/// #pragma omp task depend(in:a,b)
/// \endcode
/// In this example directive '#pragma omp task' with clause 'depend' with the
/// variables 'a' and 'b' with dependency 'in'.
class OMPDependClause final
: public OMPVarListClause<OMPDependClause>,
private llvm::TrailingObjects<OMPDependClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Dependency type (one of in, out, inout).
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
/// Dependency type location.
SourceLocation DepLoc;
/// Colon location.
SourceLocation ColonLoc;
/// Number of loops, associated with the depend clause.
unsigned NumLoops = 0;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N, unsigned NumLoops)
: OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc,
EndLoc, N), NumLoops(NumLoops) {}
/// Build an empty clause.
///
/// \param N Number of variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
explicit OMPDependClause(unsigned N, unsigned NumLoops)
: OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(),
SourceLocation(), SourceLocation(),
N),
NumLoops(NumLoops) {}
/// Set dependency kind.
void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; }
/// Set dependency kind and its location.
void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; }
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param DepKind Dependency type.
/// \param DepLoc Location of the dependency type.
/// \param ColonLoc Colon location.
/// \param VL List of references to the variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VL, unsigned NumLoops);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N,
unsigned NumLoops);
/// Get dependency type.
OpenMPDependClauseKind getDependencyKind() const { return DepKind; }
/// Get dependency type location.
SourceLocation getDependencyLoc() const { return DepLoc; }
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Get number of loops associated with the clause.
unsigned getNumLoops() const { return NumLoops; }
/// Set the loop data for the depend clauses with 'sink|source' kind of
/// dependency.
void setLoopData(unsigned NumLoop, Expr *Cnt);
/// Get the loop data.
Expr *getLoopData(unsigned NumLoop);
const Expr *getLoopData(unsigned NumLoop) const;
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPDependClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_depend;
}
};
/// This represents 'device' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp target device(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'device'
/// with single expression 'a'.
class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Device number.
Stmt *Device = nullptr;
/// Set the device number.
///
/// \param E Device number.
void setDevice(Expr *E) { Device = E; }
public:
/// Build 'device' clause.
///
/// \param E Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDeviceClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Device(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPDeviceClause()
: OMPClause(OMPC_device, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return device number.
Expr *getDevice() { return cast<Expr>(Device); }
/// Return device number.
Expr *getDevice() const { return cast<Expr>(Device); }
child_range children() { return child_range(&Device, &Device + 1); }
const_child_range children() const {
return const_child_range(&Device, &Device + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_device;
}
};
/// This represents 'threads' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered threads
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'threads' clause.
class OMPThreadsClause : public OMPClause {
public:
/// Build 'threads' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_threads, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPThreadsClause()
: OMPClause(OMPC_threads, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_threads;
}
};
/// This represents 'simd' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered simd
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'simd' clause.
class OMPSIMDClause : public OMPClause {
public:
/// Build 'simd' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_simd, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPSIMDClause() : OMPClause(OMPC_simd, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_simd;
}
};
/// Struct that defines common infrastructure to handle mappable
/// expressions used in OpenMP clauses.
class OMPClauseMappableExprCommon {
public:
/// Class that represents a component of a mappable expression. E.g.
/// for an expression S.a, the first component is a declaration reference
/// expression associated with 'S' and the second is a member expression
/// associated with the field declaration 'a'. If the expression is an array
/// subscript it may not have any associated declaration. In that case the
/// associated declaration is set to nullptr.
class MappableComponent {
/// Expression associated with the component.
Expr *AssociatedExpression = nullptr;
/// Declaration associated with the declaration. If the component does
/// not have a declaration (e.g. array subscripts or section), this is set
/// to nullptr.
ValueDecl *AssociatedDeclaration = nullptr;
public:
explicit MappableComponent() = default;
explicit MappableComponent(Expr *AssociatedExpression,
ValueDecl *AssociatedDeclaration)
: AssociatedExpression(AssociatedExpression),
AssociatedDeclaration(
AssociatedDeclaration
? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl())
: nullptr) {}
Expr *getAssociatedExpression() const { return AssociatedExpression; }
ValueDecl *getAssociatedDeclaration() const {
return AssociatedDeclaration;
}
};
// List of components of an expression. This first one is the whole
// expression and the last one is the base expression.
using MappableExprComponentList = SmallVector<MappableComponent, 8>;
using MappableExprComponentListRef = ArrayRef<MappableComponent>;
// List of all component lists associated to the same base declaration.
// E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have
// their component list but the same base declaration 'S'.
using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>;
using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>;
protected:
// Return the total number of elements in a list of component lists.
static unsigned
getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists);
// Return the total number of elements in a list of declarations. All
// declarations are expected to be canonical.
static unsigned
getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations);
};
/// This structure contains all sizes needed for by an
/// OMPMappableExprListClause.
struct OMPMappableExprListSizeTy {
/// Number of expressions listed.
unsigned NumVars;
/// Number of unique base declarations.
unsigned NumUniqueDeclarations;
/// Number of component lists.
unsigned NumComponentLists;
/// Total number of expression components.
unsigned NumComponents;
OMPMappableExprListSizeTy() = default;
OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations),
NumComponentLists(NumComponentLists), NumComponents(NumComponents) {}
};
/// This represents clauses with a list of expressions that are mappable.
/// Examples of these clauses are 'map' in
/// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from
/// in '#pragma omp target update...' directives.
template <class T>
class OMPMappableExprListClause : public OMPVarListClause<T>,
public OMPClauseMappableExprCommon {
friend class OMPClauseReader;
/// Number of unique declarations in this clause.
unsigned NumUniqueDeclarations;
/// Number of component lists in this clause.
unsigned NumComponentLists;
/// Total number of components in this clause.
unsigned NumComponents;
/// C++ nested name specifier for the associated user-defined mapper.
NestedNameSpecifierLoc MapperQualifierLoc;
/// The associated user-defined mapper identifier information.
DeclarationNameInfo MapperIdInfo;
protected:
/// Build a clause for \a NumUniqueDeclarations declarations, \a
/// NumComponentLists total component lists, and \a NumComponents total
/// components.
///
/// \param K Kind of the clause.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
/// \param MapperQualifierLocPtr C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfoPtr The identifier of associated user-defined mapper.
OMPMappableExprListClause(
OpenMPClauseKind K, const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes,
NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr,
DeclarationNameInfo *MapperIdInfoPtr = nullptr)
: OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc,
Sizes.NumVars),
NumUniqueDeclarations(Sizes.NumUniqueDeclarations),
NumComponentLists(Sizes.NumComponentLists),
NumComponents(Sizes.NumComponents) {
if (MapperQualifierLocPtr)
MapperQualifierLoc = *MapperQualifierLocPtr;
if (MapperIdInfoPtr)
MapperIdInfo = *MapperIdInfoPtr;
}
/// Get the unique declarations that are in the trailing objects of the
/// class.
MutableArrayRef<ValueDecl *> getUniqueDeclsRef() {
return MutableArrayRef<ValueDecl *>(
static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// Get the unique declarations that are in the trailing objects of the
/// class.
ArrayRef<ValueDecl *> getUniqueDeclsRef() const {
return ArrayRef<ValueDecl *>(
static_cast<const T *>(this)
->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// Set the unique declarations that are in the trailing objects of the
/// class.
void setUniqueDecls(ArrayRef<ValueDecl *> UDs) {
assert(UDs.size() == NumUniqueDeclarations &&
"Unexpected amount of unique declarations.");
std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin());
}
/// Get the number of lists per declaration that are in the trailing
/// objects of the class.
MutableArrayRef<unsigned> getDeclNumListsRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// Get the number of lists per declaration that are in the trailing
/// objects of the class.
ArrayRef<unsigned> getDeclNumListsRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// Set the number of lists per declaration that are in the trailing
/// objects of the class.
void setDeclNumLists(ArrayRef<unsigned> DNLs) {
assert(DNLs.size() == NumUniqueDeclarations &&
"Unexpected amount of list numbers.");
std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin());
}
/// Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
MutableArrayRef<unsigned> getComponentListSizesRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
ArrayRef<unsigned> getComponentListSizesRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// Set the cumulative component lists sizes that are in the trailing
/// objects of the class.
void setComponentListSizes(ArrayRef<unsigned> CLSs) {
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of component lists.");
std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin());
}
/// Get the components that are in the trailing objects of the class.
MutableArrayRef<MappableComponent> getComponentsRef() {
return MutableArrayRef<MappableComponent>(
static_cast<T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// Get the components that are in the trailing objects of the class.
ArrayRef<MappableComponent> getComponentsRef() const {
return ArrayRef<MappableComponent>(
static_cast<const T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// Set the components that are in the trailing objects of the class.
/// This requires the list sizes so that it can also fill the original
/// expressions, which are the first component of each list.
void setComponents(ArrayRef<MappableComponent> Components,
ArrayRef<unsigned> CLSs) {
assert(Components.size() == NumComponents &&
"Unexpected amount of component lists.");
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of list sizes.");
std::copy(Components.begin(), Components.end(), getComponentsRef().begin());
}
/// Fill the clause information from the list of declarations and
/// associated component lists.
void setClauseInfo(ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists) {
// Perform some checks to make sure the data sizes are consistent with the
// information available when the clause was created.
assert(getUniqueDeclarationsTotalNumber(Declarations) ==
NumUniqueDeclarations &&
"Unexpected number of mappable expression info entries!");
assert(getComponentsTotalNumber(ComponentLists) == NumComponents &&
"Unexpected total number of components!");
assert(Declarations.size() == ComponentLists.size() &&
"Declaration and component lists size is not consistent!");
assert(Declarations.size() == NumComponentLists &&
"Unexpected declaration and component lists size!");
// Organize the components by declaration and retrieve the original
// expression. Original expressions are always the first component of the
// mappable component list.
llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>>
ComponentListMap;
{
auto CI = ComponentLists.begin();
for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE;
++DI, ++CI) {
assert(!CI->empty() && "Invalid component list!");
ComponentListMap[*DI].push_back(*CI);
}
}
// Iterators of the target storage.
auto UniqueDeclarations = getUniqueDeclsRef();
auto UDI = UniqueDeclarations.begin();
auto DeclNumLists = getDeclNumListsRef();
auto DNLI = DeclNumLists.begin();
auto ComponentListSizes = getComponentListSizesRef();
auto CLSI = ComponentListSizes.begin();
auto Components = getComponentsRef();
auto CI = Components.begin();
// Variable to compute the accumulation of the number of components.
unsigned PrevSize = 0u;
// Scan all the declarations and associated component lists.
for (auto &M : ComponentListMap) {
// The declaration.
auto *D = M.first;
// The component lists.
auto CL = M.second;
// Initialize the entry.
*UDI = D;
++UDI;
*DNLI = CL.size();
++DNLI;
// Obtain the cumulative sizes and concatenate all the components in the
// reserved storage.
for (auto C : CL) {
// Accumulate with the previous size.
PrevSize += C.size();
// Save the size.
*CLSI = PrevSize;
++CLSI;
// Append components after the current components iterator.
CI = std::copy(C.begin(), C.end(), CI);
}
}
}
/// Set the nested name specifier of associated user-defined mapper.
void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) {
MapperQualifierLoc = NNSL;
}
/// Set the name of associated user-defined mapper.
void setMapperIdInfo(DeclarationNameInfo MapperId) {
MapperIdInfo = MapperId;
}
/// Get the user-defined mapper references that are in the trailing objects of
/// the class.
MutableArrayRef<Expr *> getUDMapperRefs() {
return llvm::makeMutableArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>() +
OMPVarListClause<T>::varlist_size(),
OMPVarListClause<T>::varlist_size());
}
/// Get the user-defined mappers references that are in the trailing objects
/// of the class.
ArrayRef<Expr *> getUDMapperRefs() const {
return llvm::makeArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>() +
OMPVarListClause<T>::varlist_size(),
OMPVarListClause<T>::varlist_size());
}
/// Set the user-defined mappers that are in the trailing objects of the
/// class.
void setUDMapperRefs(ArrayRef<Expr *> DMDs) {
assert(DMDs.size() == OMPVarListClause<T>::varlist_size() &&
"Unexpected number of user-defined mappers.");
std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin());
}
public:
/// Return the number of unique base declarations in this clause.
unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; }
/// Return the number of lists derived from the clause expressions.
unsigned getTotalComponentListNum() const { return NumComponentLists; }
/// Return the total number of components in all lists derived from the
/// clause.
unsigned getTotalComponentsNum() const { return NumComponents; }
/// Gets the nested name specifier for associated user-defined mapper.
NestedNameSpecifierLoc getMapperQualifierLoc() const {
return MapperQualifierLoc;
}
/// Gets the name info for associated user-defined mapper.
const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; }
/// Iterator that browse the components by lists. It also allows
/// browsing components of a single declaration.
class const_component_lists_iterator
: public llvm::iterator_adaptor_base<
const_component_lists_iterator,
MappableExprComponentListRef::const_iterator,
std::forward_iterator_tag, MappableComponent, ptrdiff_t,
MappableComponent, MappableComponent> {
// The declaration the iterator currently refers to.
ArrayRef<ValueDecl *>::iterator DeclCur;
// The list number associated with the current declaration.
ArrayRef<unsigned>::iterator NumListsCur;
// Remaining lists for the current declaration.
unsigned RemainingLists = 0;
// The cumulative size of the previous list, or zero if there is no previous
// list.
unsigned PrevListSize = 0;
// The cumulative sizes of the current list - it will delimit the remaining
// range of interest.
ArrayRef<unsigned>::const_iterator ListSizeCur;
ArrayRef<unsigned>::const_iterator ListSizeEnd;
// Iterator to the end of the components storage.
MappableExprComponentListRef::const_iterator End;
public:
/// Construct an iterator that scans all lists.
explicit const_component_lists_iterator(
ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum,
ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components)
: const_component_lists_iterator::iterator_adaptor_base(
Components.begin()),
DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()),
ListSizeCur(CumulativeListSizes.begin()),
ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) {
assert(UniqueDecls.size() == DeclsListNum.size() &&
"Inconsistent number of declarations and list sizes!");
if (!DeclsListNum.empty())
RemainingLists = *NumListsCur;
}
/// Construct an iterator that scan lists for a given declaration \a
/// Declaration.
explicit const_component_lists_iterator(
const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls,
ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components)
: const_component_lists_iterator(UniqueDecls, DeclsListNum,
CumulativeListSizes, Components) {
// Look for the desired declaration. While we are looking for it, we
// update the state so that we know the component where a given list
// starts.
for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) {
if (*DeclCur == Declaration)
break;
assert(*NumListsCur > 0 && "No lists associated with declaration??");
// Skip the lists associated with the current declaration, but save the
// last list size that was skipped.
std::advance(ListSizeCur, *NumListsCur - 1);
PrevListSize = *ListSizeCur;
++ListSizeCur;
}
// If we didn't find any declaration, advance the iterator to after the
// last component and set remaining lists to zero.
if (ListSizeCur == CumulativeListSizes.end()) {
this->I = End;
RemainingLists = 0u;
return;
}
// Set the remaining lists with the total number of lists of the current
// declaration.
RemainingLists = *NumListsCur;
// Adjust the list size end iterator to the end of the relevant range.
ListSizeEnd = ListSizeCur;
std::advance(ListSizeEnd, RemainingLists);
// Given that the list sizes are cumulative, the index of the component
// that start the list is the size of the previous list.
std::advance(this->I, PrevListSize);
}
// Return the array with the current list. The sizes are cumulative, so the
// array size is the difference between the current size and previous one.
std::pair<const ValueDecl *, MappableExprComponentListRef>
operator*() const {
assert(ListSizeCur != ListSizeEnd && "Invalid iterator!");
return std::make_pair(
*DeclCur,
MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize));
}
std::pair<const ValueDecl *, MappableExprComponentListRef>
operator->() const {
return **this;
}
// Skip the components of the current list.
const_component_lists_iterator &operator++() {
assert(ListSizeCur != ListSizeEnd && RemainingLists &&
"Invalid iterator!");
// If we don't have more lists just skip all the components. Otherwise,
// advance the iterator by the number of components in the current list.
if (std::next(ListSizeCur) == ListSizeEnd) {
this->I = End;
RemainingLists = 0;
} else {
std::advance(this->I, *ListSizeCur - PrevListSize);
PrevListSize = *ListSizeCur;
// We are done with a declaration, move to the next one.
if (!(--RemainingLists)) {
++DeclCur;
++NumListsCur;
RemainingLists = *NumListsCur;
assert(RemainingLists && "No lists in the following declaration??");
}
}
++ListSizeCur;
return *this;
}
};
using const_component_lists_range =
llvm::iterator_range<const_component_lists_iterator>;
/// Iterators for all component lists.
const_component_lists_iterator component_lists_begin() const {
return const_component_lists_iterator(
getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(),
getComponentsRef());
}
const_component_lists_iterator component_lists_end() const {
return const_component_lists_iterator(
ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(),
MappableExprComponentListRef(getComponentsRef().end(),
getComponentsRef().end()));
}
const_component_lists_range component_lists() const {
return {component_lists_begin(), component_lists_end()};
}
/// Iterators for component lists associated with the provided
/// declaration.
const_component_lists_iterator
decl_component_lists_begin(const ValueDecl *VD) const {
return const_component_lists_iterator(
VD, getUniqueDeclsRef(), getDeclNumListsRef(),
getComponentListSizesRef(), getComponentsRef());
}
const_component_lists_iterator decl_component_lists_end() const {
return component_lists_end();
}
const_component_lists_range decl_component_lists(const ValueDecl *VD) const {
return {decl_component_lists_begin(VD), decl_component_lists_end()};
}
/// Iterators to access all the declarations, number of lists, list sizes, and
/// components.
using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator;
using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>;
const_all_decls_range all_decls() const {
auto A = getUniqueDeclsRef();
return const_all_decls_range(A.begin(), A.end());
}
using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator;
using const_all_num_lists_range =
llvm::iterator_range<const_all_num_lists_iterator>;
const_all_num_lists_range all_num_lists() const {
auto A = getDeclNumListsRef();
return const_all_num_lists_range(A.begin(), A.end());
}
using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator;
using const_all_lists_sizes_range =
llvm::iterator_range<const_all_lists_sizes_iterator>;
const_all_lists_sizes_range all_lists_sizes() const {
auto A = getComponentListSizesRef();
return const_all_lists_sizes_range(A.begin(), A.end());
}
using const_all_components_iterator = ArrayRef<MappableComponent>::iterator;
using const_all_components_range =
llvm::iterator_range<const_all_components_iterator>;
const_all_components_range all_components() const {
auto A = getComponentsRef();
return const_all_components_range(A.begin(), A.end());
}
using mapperlist_iterator = MutableArrayRef<Expr *>::iterator;
using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator;
using mapperlist_range = llvm::iterator_range<mapperlist_iterator>;
using mapperlist_const_range =
llvm::iterator_range<mapperlist_const_iterator>;
mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); }
mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); }
mapperlist_const_iterator mapperlist_begin() const {
return getUDMapperRefs().begin();
}
mapperlist_const_iterator mapperlist_end() const {
return getUDMapperRefs().end();
}
mapperlist_range mapperlists() {
return mapperlist_range(mapperlist_begin(), mapperlist_end());
}
mapperlist_const_range mapperlists() const {
return mapperlist_const_range(mapperlist_begin(), mapperlist_end());
}
};
/// This represents clause 'map' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target map(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'map'
/// with the variables 'a' and 'b'.
class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>,
private llvm::TrailingObjects<
OMPMapClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Number of allowed map-type-modifiers.
static constexpr unsigned NumberOfModifiers =
OMPC_MAP_MODIFIER_last - OMPC_MAP_MODIFIER_unknown - 1;
private:
/// Map-type-modifiers for the 'map' clause.
OpenMPMapModifierKind MapTypeModifiers[NumberOfModifiers] = {
OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown,
OMPC_MAP_MODIFIER_unknown};
/// Location of map-type-modifiers for the 'map' clause.
SourceLocation MapTypeModifiersLoc[NumberOfModifiers];
/// Map type for the 'map' clause.
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
/// Is this an implicit map type or not.
bool MapTypeIsImplicit = false;
/// Location of the map type.
SourceLocation MapLoc;
/// Colon location.
SourceLocation ColonLoc;
/// Build a clause for \a NumVars listed expressions, \a
/// NumUniqueDeclarations declarations, \a NumComponentLists total component
/// lists, and \a NumComponents total expression components.
///
/// \param MapModifiers Map-type-modifiers.
/// \param MapModifiersLoc Locations of map-type-modifiers.
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param MapType Map type.
/// \param MapTypeIsImplicit Map type is inferred implicitly.
/// \param MapLoc Location of the map type.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<SourceLocation> MapModifiersLoc,
NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
OpenMPMapClauseKind MapType, bool MapTypeIsImplicit,
SourceLocation MapLoc, const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_map, Locs, Sizes, &MapperQualifierLoc,
&MapperIdInfo),
MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) {
assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() &&
"Unexpected number of map type modifiers.");
llvm::copy(MapModifiers, std::begin(MapTypeModifiers));
assert(llvm::array_lengthof(MapTypeModifiersLoc) ==
MapModifiersLoc.size() &&
"Unexpected number of map type modifier locations.");
llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc));
}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_map, OMPVarListLocTy(), Sizes) {}
/// Set map-type-modifier for the clause.
///
/// \param I index for map-type-modifier.
/// \param T map-type-modifier for the clause.
void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) {
assert(I < NumberOfModifiers &&
"Unexpected index to store map type modifier, exceeds array size.");
MapTypeModifiers[I] = T;
}
/// Set location for the map-type-modifier.
///
/// \param I index for map-type-modifier location.
/// \param TLoc map-type-modifier location.
void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) {
assert(I < NumberOfModifiers &&
"Index to store map type modifier location exceeds array size.");
MapTypeModifiersLoc[I] = TLoc;
}
/// Set type for the clause.
///
/// \param T Type for the clause.
void setMapType(OpenMPMapClauseKind T) { MapType = T; }
/// Set type location.
///
/// \param TLoc Type location.
void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; }
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param MapModifiers Map-type-modifiers.
/// \param MapModifiersLoc Location of map-type-modifiers.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
/// \param Type Map type.
/// \param TypeIsImplicit Map type is inferred implicitly.
/// \param TypeLoc Location of the map type.
static OMPMapClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<SourceLocation> MapModifiersLoc,
NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId,
OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc);
/// Creates an empty clause with the place for \a NumVars original
/// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists
/// lists, and \a NumComponents expression components.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPMapClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
/// Fetches mapping kind for the clause.
OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; }
/// Is this an implicit map type?
/// We have to capture 'IsMapTypeImplicit' from the parser for more
/// informative error messages. It helps distinguish map(r) from
/// map(tofrom: r), which is important to print more helpful error
/// messages for some target directives.
bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; }
/// Fetches the map-type-modifier at 'Cnt' index of array of modifiers.
///
/// \param Cnt index for map-type-modifier.
OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfModifiers &&
"Requested modifier exceeds the total number of modifiers.");
return MapTypeModifiers[Cnt];
}
/// Fetches the map-type-modifier location at 'Cnt' index of array of
/// modifiers' locations.
///
/// \param Cnt index for map-type-modifier location.
SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfModifiers &&
"Requested modifier location exceeds total number of modifiers.");
return MapTypeModifiersLoc[Cnt];
}
/// Fetches ArrayRef of map-type-modifiers.
ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY {
return llvm::makeArrayRef(MapTypeModifiers);
}
/// Fetches ArrayRef of location of map-type-modifiers.
ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY {
return llvm::makeArrayRef(MapTypeModifiersLoc);
}
/// Fetches location of clause mapping kind.
SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; }
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
child_range children() {
return child_range(
reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPMapClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
if (MapType == OMPC_MAP_to || MapType == OMPC_MAP_tofrom)
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
auto Children = const_cast<OMPMapClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_map;
}
};
/// This represents 'num_teams' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams num_teams(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'num_teams'
/// with single expression 'n'.
class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// NumTeams number.
Stmt *NumTeams = nullptr;
/// Set the NumTeams number.
///
/// \param E NumTeams number.
void setNumTeams(Expr *E) { NumTeams = E; }
public:
/// Build 'num_teams' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), NumTeams(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPNumTeamsClause()
: OMPClause(OMPC_num_teams, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return NumTeams number.
Expr *getNumTeams() { return cast<Expr>(NumTeams); }
/// Return NumTeams number.
Expr *getNumTeams() const { return cast<Expr>(NumTeams); }
child_range children() { return child_range(&NumTeams, &NumTeams + 1); }
const_child_range children() const {
return const_child_range(&NumTeams, &NumTeams + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_teams;
}
};
/// This represents 'thread_limit' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams thread_limit(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'thread_limit'
/// with single expression 'n'.
class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// ThreadLimit number.
Stmt *ThreadLimit = nullptr;
/// Set the ThreadLimit number.
///
/// \param E ThreadLimit number.
void setThreadLimit(Expr *E) { ThreadLimit = E; }
public:
/// Build 'thread_limit' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPThreadLimitClause(Expr *E, Stmt *HelperE,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_thread_limit, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPThreadLimitClause()
: OMPClause(OMPC_thread_limit, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return ThreadLimit number.
Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); }
/// Return ThreadLimit number.
Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); }
child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); }
const_child_range children() const {
return const_child_range(&ThreadLimit, &ThreadLimit + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_thread_limit;
}
};
/// This represents 'priority' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task priority(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'priority' with
/// single expression 'n'.
class OMPPriorityClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Priority number.
Stmt *Priority = nullptr;
/// Set the Priority number.
///
/// \param E Priority number.
void setPriority(Expr *E) { Priority = E; }
public:
/// Build 'priority' clause.
///
/// \param Priority Expression associated with this clause.
/// \param HelperPriority Helper priority for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPPriorityClause(Expr *Priority, Stmt *HelperPriority,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_priority, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Priority(Priority) {
setPreInitStmt(HelperPriority, CaptureRegion);
}
/// Build an empty clause.
OMPPriorityClause()
: OMPClause(OMPC_priority, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return Priority number.
Expr *getPriority() { return cast<Expr>(Priority); }
/// Return Priority number.
Expr *getPriority() const { return cast<Expr>(Priority); }
child_range children() { return child_range(&Priority, &Priority + 1); }
const_child_range children() const {
return const_child_range(&Priority, &Priority + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPPriorityClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_priority;
}
};
/// This represents 'grainsize' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop grainsize(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'grainsize'
/// with single expression '4'.
class OMPGrainsizeClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Grainsize = nullptr;
/// Set safelen.
void setGrainsize(Expr *Size) { Grainsize = Size; }
public:
/// Build 'grainsize' clause.
///
/// \param Size Expression associated with this clause.
/// \param HelperSize Helper grainsize for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPGrainsizeClause(Expr *Size, Stmt *HelperSize,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_grainsize, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Grainsize(Size) {
setPreInitStmt(HelperSize, CaptureRegion);
}
/// Build an empty clause.
explicit OMPGrainsizeClause()
: OMPClause(OMPC_grainsize, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); }
child_range children() { return child_range(&Grainsize, &Grainsize + 1); }
const_child_range children() const {
return const_child_range(&Grainsize, &Grainsize + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPGrainsizeClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_grainsize;
}
};
/// This represents 'nogroup' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp taskloop nogroup
/// \endcode
/// In this example directive '#pragma omp taskloop' has 'nogroup' clause.
class OMPNogroupClause : public OMPClause {
public:
/// Build 'nogroup' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_nogroup, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPNogroupClause()
: OMPClause(OMPC_nogroup, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nogroup;
}
};
/// This represents 'num_tasks' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop num_tasks(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'num_tasks'
/// with single expression '4'.
class OMPNumTasksClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *NumTasks = nullptr;
/// Set safelen.
void setNumTasks(Expr *Size) { NumTasks = Size; }
public:
/// Build 'num_tasks' clause.
///
/// \param Size Expression associated with this clause.
/// \param HelperSize Helper grainsize for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNumTasksClause(Expr *Size, Stmt *HelperSize,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_num_tasks, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), NumTasks(Size) {
setPreInitStmt(HelperSize, CaptureRegion);
}
/// Build an empty clause.
explicit OMPNumTasksClause()
: OMPClause(OMPC_num_tasks, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); }
child_range children() { return child_range(&NumTasks, &NumTasks + 1); }
const_child_range children() const {
return const_child_range(&NumTasks, &NumTasks + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPNumTasksClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_tasks;
}
};
/// This represents 'hint' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp critical (name) hint(6)
/// \endcode
/// In this example directive '#pragma omp critical' has name 'name' and clause
/// 'hint' with argument '6'.
class OMPHintClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Hint expression of the 'hint' clause.
Stmt *Hint = nullptr;
/// Set hint expression.
void setHint(Expr *H) { Hint = H; }
public:
/// Build 'hint' clause with expression \a Hint.
///
/// \param Hint Hint expression.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc),
Hint(Hint) {}
/// Build an empty clause.
OMPHintClause() : OMPClause(OMPC_hint, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns number of threads.
Expr *getHint() const { return cast_or_null<Expr>(Hint); }
child_range children() { return child_range(&Hint, &Hint + 1); }
const_child_range children() const {
return const_child_range(&Hint, &Hint + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_hint;
}
};
/// This represents 'dist_schedule' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp distribute dist_schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp distribute' has 'dist_schedule'
/// clause with arguments 'static' and '3'.
class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'schedule' clause.
OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown;
/// Start location of the schedule kind in source code.
SourceLocation KindLoc;
/// Location of ',' (if any).
SourceLocation CommaLoc;
/// Chunk size.
Expr *ChunkSize = nullptr;
/// Set schedule kind.
///
/// \param K Schedule kind.
void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; }
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// Set location of ','.
///
/// \param Loc Location of ','.
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// Set chunk size.
///
/// \param E Chunk size.
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// Build 'dist_schedule' clause with schedule kind \a Kind and chunk
/// size expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind DistSchedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc,
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
Stmt *HelperChunkSize)
: OMPClause(OMPC_dist_schedule, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind),
KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
}
/// Build an empty clause.
explicit OMPDistScheduleClause()
: OMPClause(OMPC_dist_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Get kind of the clause.
OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; }
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getDistScheduleKindLoc() { return KindLoc; }
/// Get location of ','.
SourceLocation getCommaLoc() { return CommaLoc; }
/// Get chunk size.
Expr *getChunkSize() { return ChunkSize; }
/// Get chunk size.
const Expr *getChunkSize() const { return ChunkSize; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
const_child_range children() const {
auto Children = const_cast<OMPDistScheduleClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_dist_schedule;
}
};
/// This represents 'defaultmap' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp target defaultmap(tofrom: scalar)
/// \endcode
/// In this example directive '#pragma omp target' has 'defaultmap' clause of kind
/// 'scalar' with modifier 'tofrom'.
class OMPDefaultmapClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Modifiers for 'defaultmap' clause.
OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown;
/// Locations of modifiers.
SourceLocation ModifierLoc;
/// A kind of the 'defaultmap' clause.
OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown;
/// Start location of the defaultmap kind in source code.
SourceLocation KindLoc;
/// Set defaultmap kind.
///
/// \param K Defaultmap kind.
void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; }
/// Set the defaultmap modifier.
///
/// \param M Defaultmap modifier.
void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) {
Modifier = M;
}
/// Set location of the defaultmap modifier.
void setDefaultmapModifierLoc(SourceLocation Loc) {
ModifierLoc = Loc;
}
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set defaultmap kind start location.
///
/// \param KLoc Defaultmap kind location.
void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
public:
/// Build 'defaultmap' clause with defaultmap kind \a Kind
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param EndLoc Ending location of the clause.
/// \param Kind Defaultmap kind.
/// \param M The modifier applied to 'defaultmap' clause.
/// \param MLoc Location of the modifier
OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation MLoc, SourceLocation KLoc,
SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind,
OpenMPDefaultmapClauseModifier M)
: OMPClause(OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc),
Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {}
/// Build an empty clause.
explicit OMPDefaultmapClause()
: OMPClause(OMPC_defaultmap, SourceLocation(), SourceLocation()) {}
/// Get kind of the clause.
OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; }
/// Get the modifier of the clause.
OpenMPDefaultmapClauseModifier getDefaultmapModifier() const {
return Modifier;
}
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getDefaultmapKindLoc() { return KindLoc; }
/// Get the modifier location.
SourceLocation getDefaultmapModifierLoc() const {
return ModifierLoc;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_defaultmap;
}
};
/// This represents clause 'to' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update to(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to'
/// with the variables 'a' and 'b'.
class OMPToClause final : public OMPMappableExprListClause<OMPToClause>,
private llvm::TrailingObjects<
OMPToClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPToClause(NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_to, Locs, Sizes, &MapperQualifierLoc,
&MapperIdInfo) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_to, OMPVarListLocTy(), Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
NestedNameSpecifierLoc UDMQualifierLoc,
DeclarationNameInfo MapperId);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPToClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPToClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_to;
}
};
/// This represents clause 'from' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update from(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'from'
/// with the variables 'a' and 'b'.
class OMPFromClause final
: public OMPMappableExprListClause<OMPFromClause>,
private llvm::TrailingObjects<
OMPFromClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPFromClause(NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_from, Locs, Sizes, &MapperQualifierLoc,
&MapperIdInfo) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_from, OMPVarListLocTy(), Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
static OMPFromClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
NestedNameSpecifierLoc UDMQualifierLoc,
DeclarationNameInfo MapperId);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPFromClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFromClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_from;
}
};
/// This represents clause 'use_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target data use_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target data' has clause
/// 'use_device_ptr' with the variables 'a' and 'b'.
class OMPUseDevicePtrClause final
: public OMPMappableExprListClause<OMPUseDevicePtrClause>,
private llvm::TrailingObjects<
OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_use_device_ptr, Locs, Sizes) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_use_device_ptr, OMPVarListLocTy(),
Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return 3 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
/// Sets the list of references to private copies with initializers for new
/// private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for new
/// private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new private
/// variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// Gets the list of references to initializer variables for new private
/// variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param PrivateVars Expressions referring to private copies.
/// \param Inits Expressions referring to private copy initializers.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPUseDevicePtrClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars,
ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPUseDevicePtrClause *
CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_use_device_ptr;
}
};
/// This represents clause 'is_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target is_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause
/// 'is_device_ptr' with the variables 'a' and 'b'.
class OMPIsDevicePtrClause final
: public OMPMappableExprListClause<OMPIsDevicePtrClause>,
private llvm::TrailingObjects<
OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_is_device_ptr, Locs, Sizes) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_is_device_ptr, OMPVarListLocTy(),
Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPIsDevicePtrClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPIsDevicePtrClause *
CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_is_device_ptr;
}
};
/// This class implements a simple visitor for OMPClause
/// subclasses.
template<class ImplClass, template <typename> class Ptr, typename RetTy>
class OMPClauseVisitorBase {
public:
#define PTR(CLASS) typename Ptr<CLASS>::type
#define DISPATCH(CLASS) \
return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S))
#define OPENMP_CLAUSE(Name, Class) \
RetTy Visit ## Class (PTR(Class) S) { DISPATCH(Class); }
#include "clang/Basic/OpenMPKinds.def"
RetTy Visit(PTR(OMPClause) S) {
// Top switch clause: visit each OMPClause.
switch (S->getClauseKind()) {
default: llvm_unreachable("Unknown clause kind!");
#define OPENMP_CLAUSE(Name, Class) \
case OMPC_ ## Name : return Visit ## Class(static_cast<PTR(Class)>(S));
#include "clang/Basic/OpenMPKinds.def"
}
}
// Base case, ignore it. :)
RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); }
#undef PTR
#undef DISPATCH
};
template <typename T>
using const_ptr = typename std::add_pointer<typename std::add_const<T>::type>;
template<class ImplClass, typename RetTy = void>
class OMPClauseVisitor :
public OMPClauseVisitorBase <ImplClass, std::add_pointer, RetTy> {};
template<class ImplClass, typename RetTy = void>
class ConstOMPClauseVisitor :
public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {};
class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> {
raw_ostream &OS;
const PrintingPolicy &Policy;
/// Process clauses with list of variables.
template <typename T> void VisitOMPClauseList(T *Node, char StartSym);
public:
OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy)
: OS(OS), Policy(Policy) {}
#define OPENMP_CLAUSE(Name, Class) void Visit##Class(Class *S);
#include "clang/Basic/OpenMPKinds.def"
};
} // namespace clang
#endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
|
GB_unop__identity_uint8_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_fc32)
// op(A') function: GB (_unop_tran__identity_uint8_fc32)
// C type: uint8_t
// A type: GxB_FC32_t
// cast: uint8_t cij = GB_cast_to_uint8_t ((double) crealf (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = GB_cast_to_uint8_t ((double) crealf (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = GB_cast_to_uint8_t ((double) crealf (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_fc32)
(
uint8_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
set_num_threads.c | #include <stdio.h>
#include <assert.h>
#ifdef _OPENMP
#include <omp.h>
#endif
int main(void)
{
int counter=0, nthreads;
#ifdef _OPENMP
omp_set_num_threads(7);
#endif
#pragma omp parallel
{
#pragma omp critical
counter ++;
nthreads = omp_get_num_threads();
}
printf("number threads is:%d\n",nthreads);
#ifdef _OPENMP
assert(counter == 7);
#else
assert (counter ==1 );
#endif
return 0;
}
|
nn_index.h | /***********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
* Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
#ifndef RTABMAP_FLANN_NNINDEX_H
#define RTABMAP_FLANN_NNINDEX_H
#include <vector>
#include <fstream>
#include "rtflann/general.h"
#include "rtflann/util/matrix.h"
#include "rtflann/util/params.h"
#include "rtflann/util/result_set.h"
#include "rtflann/util/dynamic_bitset.h"
#include "rtflann/util/saving.h"
#include "rtflann/util/allocator.h"
namespace rtflann
{
#define KNN_HEAP_THRESHOLD 250
class IndexBase
{
public:
virtual ~IndexBase() {};
virtual size_t veclen() const = 0;
virtual size_t size() const = 0;
virtual flann_algorithm_t getType() const = 0;
virtual int usedMemory() const = 0;
virtual IndexParams getParameters() const = 0;
virtual void loadIndex(FILE* stream) = 0;
virtual void saveIndex(FILE* stream) = 0;
virtual void debug_index() {}
};
/**
* Nearest-neighbour index base class
*/
template <typename Distance>
class NNIndex : public IndexBase
{
public:
typedef typename Distance::ElementType ElementType;
typedef typename Distance::ResultType DistanceType;
virtual void debug_index() {}
virtual void set_cached(int cache) {}
virtual void save_index(std::ofstream *outfile) {}
virtual void load_index(std::ifstream *infile, char *data_ptr) {}
NNIndex(Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0),
removed_(false), removed_count_(0), data_ptr_(NULL)
{
}
NNIndex(const IndexParams& params, Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0),
index_params_(params), removed_(false), removed_count_(0), data_ptr_(NULL)
{
}
NNIndex(const NNIndex& other) :
distance_(other.distance_),
last_id_(other.last_id_),
size_(other.size_),
size_at_build_(other.size_at_build_),
veclen_(other.veclen_),
index_params_(other.index_params_),
removed_(other.removed_),
removed_points_(other.removed_points_),
removed_count_(other.removed_count_),
ids_(other.ids_),
points_(other.points_),
data_ptr_(NULL)
{
if (other.data_ptr_) {
std::cout << "creating new index: " << size_ << "," << veclen_ << std::endl;
data_ptr_ = new ElementType[size_*veclen_];
std::copy(other.data_ptr_, other.data_ptr_+size_*veclen_, data_ptr_);
for (size_t i=0;i<size_;++i) {
points_[i] = data_ptr_ + i*veclen_;
}
}
}
virtual ~NNIndex()
{
if (data_ptr_) {
delete[] data_ptr_;
}
}
virtual NNIndex* clone() const = 0;
/**
* Builds the index
*/
virtual void buildIndex()
{
std::cout << "-----running buildIndex()------------" << std::endl;
freeIndex();
cleanRemovedPoints();
// building index
buildIndexImpl();
size_at_build_ = size_;
}
/**
* Builds the index using the specified dataset
* @param dataset the dataset to use
*/
virtual void buildIndex(const Matrix<ElementType>& dataset)
{
setDataset(dataset);
this->buildIndex();
}
/**
* @brief Incrementally add points to the index.
* @param points Matrix with points to be added
* @param rebuild_threshold
*/
virtual void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2)
{
throw FLANNException("Functionality not supported by this index");
}
/**
* Remove point from the index
* @param index Index of point to be removed
*/
virtual void removePoint(size_t id)
{
if (!removed_) {
ids_.resize(size_);
for (size_t i=0;i<size_;++i) {
ids_[i] = i;
}
removed_points_.resize(size_);
removed_points_.reset();
last_id_ = size_;
removed_ = true;
}
size_t point_index = id_to_index(id);
if (point_index!=size_t(-1) && !removed_points_.test(point_index)) {
removed_points_.set(point_index);
removed_count_++;
}
}
/**
* Get point with specific id
* @param id
* @return
*/
virtual ElementType* getPoint(size_t id)
{
size_t index = id_to_index(id);
if (index!=size_t(-1)) {
return points_[index];
}
else {
return NULL;
}
}
/**
* @return number of features in this index.
*/
inline size_t size() const
{
return size_ - removed_count_;
}
inline size_t removedCount() const
{
return removed_count_;
}
inline size_t sizeAtBuild() const
{
return size_at_build_;
}
/**
* @return The dimensionality of the features in this index.
*/
inline size_t veclen() const
{
return veclen_;
}
/**
* Returns the parameters used by the index.
*
* @return The index parameters
*/
IndexParams getParameters() const
{
return index_params_;
}
template<typename Archive>
void serialize(Archive& ar)
{
IndexHeader header;
if (Archive::is_saving::value) {
header.h.data_type = flann_datatype_value<ElementType>::value;
header.h.index_type = getType();
header.h.rows = size_;
header.h.cols = veclen_;
}
ar & header;
// sanity checks
if (Archive::is_loading::value) {
if (strncmp(header.h.signature,
FLANN_SIGNATURE_,
strlen(FLANN_SIGNATURE_) - strlen("v0.0")) != 0) {
throw FLANNException("Invalid index file, wrong signature");
}
if (header.h.data_type != flann_datatype_value<ElementType>::value) {
throw FLANNException("Datatype of saved index is different than of the one to be created.");
}
if (header.h.index_type != getType()) {
throw FLANNException("Saved index type is different then the current index type.");
}
// TODO: check for distance type
}
ar & size_;
ar & veclen_;
ar & size_at_build_;
bool save_dataset;
if (Archive::is_saving::value) {
save_dataset = get_param(index_params_,"save_dataset", false);
}
ar & save_dataset;
if (save_dataset) {
if (Archive::is_loading::value) {
if (data_ptr_) {
delete[] data_ptr_;
}
data_ptr_ = new ElementType[size_*veclen_];
points_.resize(size_);
for (size_t i=0;i<size_;++i) {
points_[i] = data_ptr_ + i*veclen_;
}
}
for (size_t i=0;i<size_;++i) {
ar & serialization::make_binary_object (points_[i], veclen_*sizeof(ElementType));
}
} else {
if (points_.size()!=size_) {
throw FLANNException("Saved index does not contain the dataset and no dataset was provided.");
}
}
ar & last_id_;
ar & ids_;
ar & removed_;
if (removed_) {
ar & removed_points_;
}
ar & removed_count_;
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
virtual int knnSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen());
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows);
assert(indices.cols >= knn);
assert(dists.cols >= knn);
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
int count = 0;
if (use_heap) {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param knn
* @param params
* @return
*/
/*int knnSearch(const Matrix<ElementType>& queries,
Matrix<int>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
rtflann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols);
int result = knnSearch(queries, indices_, dists, knn, params);
for (size_t i=0;i<indices.rows;++i) {
for (size_t j=0;j<indices.cols;++j) {
indices[i][j] = indices_[i][j];
}
}
delete[] indices_.ptr();
return result;
}*/
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
virtual int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen());
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
int count = 0;
if (use_heap) {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param knn
* @param params
* @return
*/
int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
std::vector<std::vector<size_t> > indices_;
int result = knnSearch(queries, indices_, dists, knn, params);
indices.resize(indices_.size());
for (size_t i=0;i<indices_.size();++i) {
indices[i].assign(indices_[i].begin(), indices_[i].end());
}
return result;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indices of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
virtual int radiusSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen());
int count = 0;
size_t num_neighbors = std::min(indices.cols, dists.cols);
int max_neighbors = params.max_neighbors;
if (max_neighbors<0) max_neighbors = num_neighbors;
else max_neighbors = std::min(max_neighbors,(int)num_neighbors);
if (max_neighbors==0) {
#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
count += resultSet.size();
}
}
}
else {
// explicitly indicated to use unbounded radius result set
// and we know there'll be enough room for resulting indices and dists
if (params.max_neighbors<0 && (num_neighbors>=size())) {
#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if (n>num_neighbors) n = num_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
else {
// number of neighbors limited to max_neighbors
#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if ((int)n>max_neighbors) n = max_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param radius
* @param params
* @return
*/
int radiusSearch(const Matrix<ElementType>& queries,
Matrix<int>& indices,
Matrix<DistanceType>& dists,
float radius,
const SearchParams& params) const
{
rtflann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols);
int result = radiusSearch(queries, indices_, dists, radius, params);
for (size_t i=0;i<indices.rows;++i) {
for (size_t j=0;j<indices.cols;++j) {
indices[i][j] = indices_[i][j];
}
}
delete[] indices_.ptr();
return result;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indices of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
virtual int radiusSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen());
int count = 0;
// just count neighbors
if (params.max_neighbors==0) {
#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
count += resultSet.size();
}
}
}
else {
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
if (params.max_neighbors<0) {
// search for all neighbors
#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
else {
// number of neighbors limited to max_neighbors
#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if ((int)n>params.max_neighbors) n = params.max_neighbors;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param radius
* @param params
* @return
*/
int radiusSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists,
float radius,
const SearchParams& params) const
{
std::vector<std::vector<size_t> > indices_;
int result = radiusSearch(queries, indices_, dists, radius, params);
indices.resize(indices_.size());
for (size_t i=0;i<indices_.size();++i) {
indices[i].assign(indices_[i].begin(), indices_[i].end());
}
return result;
}
virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const = 0;
protected:
virtual void freeIndex() = 0;
virtual void buildIndexImpl() = 0;
size_t id_to_index(size_t id)
{
if (ids_.size()==0) {
return id;
}
size_t point_index = size_t(-1);
if (id < ids_.size() && ids_[id]==id) {
return id;
}
else {
// binary search
size_t start = 0;
size_t end = ids_.size();
while (start<end) {
size_t mid = (start+end)/2;
if (ids_[mid]==id) {
point_index = mid;
break;
}
else if (ids_[mid]<id) {
start = mid + 1;
}
else {
end = mid;
}
}
}
return point_index;
}
void indices_to_ids(const size_t* in, size_t* out, size_t size) const
{
if (removed_) {
for (size_t i=0;i<size;++i) {
out[i] = ids_[in[i]];
}
}
}
void setDataset(const Matrix<ElementType>& dataset)
{
size_ = dataset.rows;
veclen_ = dataset.cols;
last_id_ = 0;
ids_.clear();
removed_points_.clear();
removed_ = false;
removed_count_ = 0;
points_.resize(size_);
for (size_t i=0;i<size_;++i) {
points_[i] = dataset[i];
}
}
void extendDataset(const Matrix<ElementType>& new_points)
{
size_t new_size = size_ + new_points.rows;
if (removed_) {
removed_points_.resize(new_size);
ids_.resize(new_size);
}
points_.resize(new_size);
for (size_t i=size_;i<new_size;++i) {
points_[i] = new_points[i-size_];
if (removed_) {
ids_[i] = last_id_++;
removed_points_.reset(i);
}
}
size_ = new_size;
}
void cleanRemovedPoints()
{
if (!removed_) return;
size_t last_idx = 0;
for (size_t i=0;i<size_;++i) {
if (!removed_points_.test(i)) {
points_[last_idx] = points_[i];
ids_[last_idx] = ids_[i];
removed_points_.reset(last_idx);
++last_idx;
}
}
points_.resize(last_idx);
ids_.resize(last_idx);
removed_points_.resize(last_idx);
size_ = last_idx;
removed_count_ = 0;
}
void swap(NNIndex& other)
{
std::swap(distance_, other.distance_);
std::swap(last_id_, other.last_id_);
std::swap(size_, other.size_);
std::swap(size_at_build_, other.size_at_build_);
std::swap(veclen_, other.veclen_);
std::swap(index_params_, other.index_params_);
std::swap(removed_, other.removed_);
std::swap(removed_points_, other.removed_points_);
std::swap(removed_count_, other.removed_count_);
std::swap(ids_, other.ids_);
std::swap(points_, other.points_);
std::swap(data_ptr_, other.data_ptr_);
}
protected:
/**
* The distance functor
*/
Distance distance_;
/**
* Each index point has an associated ID. IDs are assigned sequentially in
* increasing order. This indicates the ID assigned to the last point added to the
* index.
*/
size_t last_id_;
/**
* Number of points in the index (and database)
*/
size_t size_;
/**
* Number of features in the dataset when the index was last built.
*/
size_t size_at_build_;
/**
* Size of one point in the index (and database)
*/
size_t veclen_;
/**
* Parameters of the index.
*/
IndexParams index_params_;
/**
* Flag indicating if at least a point was removed from the index
*/
bool removed_;
/**
* Array used to mark points removed from the index
*/
DynamicBitset removed_points_;
/**
* Number of points removed from the index
*/
size_t removed_count_;
/**
* Array of point IDs, returned by nearest-neighbour operations
*/
std::vector<size_t> ids_;
/**
* Point data
*/
std::vector<ElementType*> points_;
/**
* Pointer to dataset memory if allocated by this index, otherwise NULL
*/
ElementType* data_ptr_;
//PooledAllocator pool_;
int cached; //1 if this index is cached, so do not rebuild (otherwise this int is 0)
};
#define USING_BASECLASS_SYMBOLS \
using NNIndex<Distance>::distance_;\
using NNIndex<Distance>::size_;\
using NNIndex<Distance>::size_at_build_;\
using NNIndex<Distance>::veclen_;\
using NNIndex<Distance>::index_params_;\
using NNIndex<Distance>::removed_points_;\
using NNIndex<Distance>::ids_;\
using NNIndex<Distance>::removed_;\
using NNIndex<Distance>::points_;\
using NNIndex<Distance>::extendDataset;\
using NNIndex<Distance>::setDataset;\
using NNIndex<Distance>::cleanRemovedPoints;\
using NNIndex<Distance>::indices_to_ids;
}
#endif //FLANN_NNINDEX_H
|
GB_unop__identity_uint64_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint64_uint32)
// op(A') function: GB (_unop_tran__identity_uint64_uint32)
// C type: uint64_t
// A type: uint32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = (uint64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint64_uint32)
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint64_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2048
#define MaxBezierCoordinates 67108864
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
if (polygon_info->edges != (EdgeInfo *) NULL)
{
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(
polygon_info->edges);
}
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double SaneStrokeWidth(const Image *image,
const DrawInfo *draw_info)
{
return(MagickMin((double) draw_info->stroke_width,
(2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows)));
}
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
SaneStrokeWidth(image,clone_info)/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > MaxBezierCoordinates)
break;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
PixelInfo
composite,
pixel;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
MagickExport int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(MagickFalse);
}
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->fill_alpha*=opacity;
else
graphic_context[n]->fill_alpha=QuantumRange*opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (IsPoint(token) == MagickFalse)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
StringToDouble(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
{
graphic_context[n]->fill_alpha*=opacity;
graphic_context[n]->stroke_alpha*=opacity;
}
else
{
graphic_context[n]->fill_alpha=QuantumRange*opacity;
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
}
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
(void) GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->stroke_alpha*=opacity;
else
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates == 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#00000000",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
{
status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
if (status == MagickFalse)
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(status);
}
}
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double ConstrainCoordinate(double x)
{
if (x < (double) -(SSIZE_MAX-512))
return((double) -(SSIZE_MAX-512));
if (x > (double) (SSIZE_MAX-512))
return((double) (SSIZE_MAX-512));
return(x);
}
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
point,
q;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status&=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x-0.5));
y=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y-0.5));
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
if (*primitive_info->text != '\0')
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
status&=TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
status&=SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
status&=DrawAffineImage(image,composite_image,&affine,exception);
else
status&=CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double)
GetPixelAlpha(image,q),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (coordinates > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static size_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(0);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define MaxStrokePad (6*BezierQuantum+360)
#define CheckPathExtent(pad_p,pad_q) \
{ \
if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \
{ \
if (~extent_p < (pad_p)) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
{ \
extent_p+=(pad_p); \
stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \
MaxStrokePad,sizeof(*stroke_p)); \
} \
} \
if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \
{ \
if (~extent_q < (pad_q)) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
{ \
extent_q+=(pad_q); \
stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \
MaxStrokePad,sizeof(*stroke_q)); \
} \
} \
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \
{ \
if (stroke_p != (PointInfo *) NULL) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
if (stroke_q != (PointInfo *) NULL) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _StrokeSegment
{
double
p,
q;
} StrokeSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*stroke_p,
*stroke_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
extent_p,
extent_q,
number_vertices;
ssize_t
j,
n,
p,
q;
StrokeSegment
dx = {0.0, 0.0},
dy = {0.0, 0.0},
inverse_slope = {0.0, 0.0},
slope = {0.0, 0.0},
theta = {0.0, 0.0};
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
closed_path=primitive_info[0].closed_subpath;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
extent_p=2*number_vertices;
extent_q=2*number_vertices;
stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad,
sizeof(*stroke_p));
stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad,
sizeof(*stroke_q));
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL))
{
if (stroke_p != (PointInfo *) NULL)
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
if (stroke_q != (PointInfo *) NULL)
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
stroke_q[p++]=box_q[0];
stroke_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(MaxStrokePad,MaxStrokePad);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad);
stroke_q[q].x=box_q[1].x;
stroke_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
stroke_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad);
stroke_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
stroke_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
stroke_p[p++]=box_p[1];
stroke_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
DRB067-restrictpointer1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
restrict pointers: no aliasing
Array initialization using assignments.
C99 is needed to compile this code
e.g. gcc -std=c99 -c Stress-1.c
*/
#include <stdlib.h>
typedef double real8;
void foo(real8 * restrict newSxx, real8 * restrict newSyy, int length)
{
int i;
#pragma omp parallel for private (i) firstprivate (length) schedule(dynamic)
for (i = 0; i <= length - 1; i += 1) {
newSxx[i] = 0.0;
newSyy[i] = 0.0;
}
}
int main()
{
int length=1000;
real8* newSxx = malloc (length* sizeof (real8));
real8* newSyy = malloc (length* sizeof (real8));
foo(newSxx, newSyy, length);
free (newSxx);
free (newSyy);
return 0;
}
|
gemm_symm_int8.h | // chgemm is pleased to support the open source community by supporting ncnn available.
//
// author:tpoisonooo (https://github.com/tpoisonooo/chgemm) implement symmetric int8 GEMM on aarch64.
//
// Copyright (C) 2019 tpoisonooo. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#pragma once
#if __aarch64__
#define DECOMPOSE_K\
int ktmp = k;\
int k8 = k >> 3;\
int k8_even = (k8 % 2 == 0) ? 0: 1;\
k -= (k8 << 3);\
int k4 = k >> 2;\
k -= (k4 << 2);\
int k2 = k >> 1;\
k -= (k2 << 1);\
int k1 = k;\
k = ktmp;
#define DECOMPOSE_N\
int ntmp = n;\
int n4 = n >> 2;\
n -= (n4 << 2);\
int n2 = n >> 1;\
n -= (n2 << 1);\
int n1 = n;\
n = ntmp;
#define PRINT_MATRIX 0
#if PRINT_MATRIX
static void print_int8_matrix(char* name, const int8_t *a, int m, int k, int ldx) {
fprintf(stdout, "------------- %s \n", name);
for (int i = 0; i < m; ++i) {
for (int j = 0; j < k; ++j) {
fprintf(stdout, "%d \t", a[i * ldx + j]);
}
fprintf(stdout, "\n\n");
}
}
static void print_int32_matrix(char* name, const int32_t *a, int m, int k, int ldx) {
fprintf(stdout, "------------- %s \n", name);
for (int i = 0; i < m; ++i) {
for (int j = 0; j < k; ++j) {
fprintf(stdout, "%d \t", a[i * ldx + j]);
}
fprintf(stdout, "\n\n");
}
}
static void print_fp32_vec(char* name, const float *a, int len) {
fprintf(stdout, "------------- %s \n", name);
for (int i = 0; i < len; ++i) {
fprintf(stdout, "%f \t", a[i]);
}
fprintf(stdout, "\n\n");
}
#endif
static void reorder_b(const int8_t* b, int8_t* sb, const int k, const int n, const int ldx) {
#if PRINT_MATRIX
print_int8_matrix("b", b, k, n, ldx);
int8_t *origin = sb;
#endif
int i = 0;
for (; i+3 < n; i += 4) {
const int8_t *p0 = b + i;
const int8_t *p1 = b + 1 * ldx + i;
const int8_t *p2 = b + 2 * ldx + i;
const int8_t *p3 = b + 3 * ldx + i;
const int8_t *p4 = b + 4 * ldx + i;
const int8_t *p5 = b + 5 * ldx + i;
const int8_t *p6 = b + 6 * ldx + i;
const int8_t *p7 = b + 7 * ldx + i;
int j = 0;
for (; j+7 < k; j += 8) {
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p4[0];
sb[5] = p5[0];
sb[6] = p6[0];
sb[7] = p7[0];
sb[8] = p0[1];
sb[9] = p1[1];
sb[10] = p2[1];
sb[11] = p3[1];
sb[12] = p4[1];
sb[13] = p5[1];
sb[14] = p6[1];
sb[15] = p7[1];
sb[16] = p0[2];
sb[17] = p1[2];
sb[18] = p2[2];
sb[19] = p3[2];
sb[20] = p4[2];
sb[21] = p5[2];
sb[22] = p6[2];
sb[23] = p7[2];
sb[24] = p0[3];
sb[25] = p1[3];
sb[26] = p2[3];
sb[27] = p3[3];
sb[28] = p4[3];
sb[29] = p5[3];
sb[30] = p6[3];
sb[31] = p7[3];
sb += 32;
p0 += 8 * ldx;
p1 += 8 * ldx;
p2 += 8 * ldx;
p3 += 8 * ldx;
p4 += 8 * ldx;
p5 += 8 * ldx;
p6 += 8 * ldx;
p7 += 8 * ldx;
}
if (j+3 < k) {
j += 4;
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p0[1];
sb[5] = p1[1];
sb[6] = p2[1];
sb[7] = p3[1];
sb[8] = p0[2];
sb[9] = p1[2];
sb[10] = p2[2];
sb[11] = p3[2];
sb[12] = p0[3];
sb[13] = p1[3];
sb[14] = p2[3];
sb[15] = p3[3];
sb += 16;
p0 += 4 * ldx;
p1 += 4 * ldx;
p2 += 4 * ldx;
p3 += 4 * ldx;
}
if (j+1 < k) {
j += 2;
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p0[1];
sb[3] = p1[1];
sb[4] = p0[2];
sb[5] = p1[2];
sb[6] = p0[3];
sb[7] = p1[3];
sb += 8;
p0 += 2 * ldx;
p1 += 2 * ldx;
}
if (j < k) {
sb[0] = p0[0];
sb[1] = p0[1];
sb[2] = p0[2];
sb[3] = p0[3];
sb += 4;
p0 += ldx;
}
}
if (i+1 < n) {
const int8_t *p0 = b + i;
const int8_t *p1 = b + 1 * ldx + i;
const int8_t *p2 = b + 2 * ldx + i;
const int8_t *p3 = b + 3 * ldx + i;
const int8_t *p4 = b + 4 * ldx + i;
const int8_t *p5 = b + 5 * ldx + i;
const int8_t *p6 = b + 6 * ldx + i;
const int8_t *p7 = b + 7 * ldx + i;
int j = 0;
for (; j+7 < k; j += 8) {
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p4[0];
sb[5] = p5[0];
sb[6] = p6[0];
sb[7] = p7[0];
sb[8] = p0[1];
sb[9] = p1[1];
sb[10] = p2[1];
sb[11] = p3[1];
sb[12] = p4[1];
sb[13] = p5[1];
sb[14] = p6[1];
sb[15] = p7[1];
sb += 16;
p0 += 8 * ldx;
p1 += 8 * ldx;
p2 += 8 * ldx;
p3 += 8 * ldx;
p4 += 8 * ldx;
p5 += 8 * ldx;
p6 += 8 * ldx;
p7 += 8 * ldx;
}
if (j+3 < k) {
j += 4;
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p0[1];
sb[5] = p1[1];
sb[6] = p2[1];
sb[7] = p3[1];
sb += 8;
p0 += 4 * ldx;
p1 += 4 * ldx;
p2 += 4 * ldx;
p3 += 4 * ldx;
}
if (j+1 < k) {
j += 2;
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p0[1];
sb[3] = p1[1];
sb += 4;
p0 += 2 * ldx;
p1 += 2 * ldx;
}
if (j < k) {
sb[0] = p0[0];
sb[1] = p0[1];
sb += 2;
p0 += ldx;
}
i += 2;
}
if (i < n) {
const int8_t *p0 = b + i;
const int8_t *p1 = b + 1 * ldx + i;
const int8_t *p2 = b + 2 * ldx + i;
const int8_t *p3 = b + 3 * ldx + i;
const int8_t *p4 = b + 4 * ldx + i;
const int8_t *p5 = b + 5 * ldx + i;
const int8_t *p6 = b + 6 * ldx + i;
const int8_t *p7 = b + 7 * ldx + i;
int j = 0;
for (; j+7 < k; j += 8) {
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p4[0];
sb[5] = p5[0];
sb[6] = p6[0];
sb[7] = p7[0];
sb += 8;
p0 += 8 * ldx;
p1 += 8 * ldx;
p2 += 8 * ldx;
p3 += 8 * ldx;
p4 += 8 * ldx;
p5 += 8 * ldx;
p6 += 8 * ldx;
p7 += 8 * ldx;
}
if (j+3 < k) {
j += 4;
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb += 4;
p0 += 4 * ldx;
p1 += 4 * ldx;
p2 += 4 * ldx;
p3 += 4 * ldx;
}
if (j+1 < k) {
j += 2;
sb[0] = p0[0];
sb[1] = p1[0];
sb += 2;
p0 += 2 * ldx;
p1 += 2 * ldx;
}
if (j < k) {
sb[0] = p0[0];
sb += 1;
p0 += ldx;
}
}
#if PRINT_MATRIX
print_int8_matrix("sb", origin, k, n, n);
#endif
}
static void reorder_a(int8_t* a, int8_t* sa, int m, const int k, const int ldx) {
#if PRINT_MATRIX
print_int8_matrix("a", a, m, k, ldx);
int8_t *origin = sa;
#endif
int i = 0;
for (; i + 3 < m; i += 4) {
int8_t *p0 = a;
int8_t *p1 = a + ldx;
int8_t *p2 = a + 2 * ldx;
int8_t *p3 = a + 3 * ldx;
int j = 0;
for (; j + 7 < k; j += 8) {
asm volatile (
"ld1 {v0.8b}, [%0], #8 \n"
"ld1 {v1.8b}, [%1], #8 \n"
"ld1 {v2.8b}, [%2], #8 \n"
"ld1 {v3.8b}, [%3], #8 \n"
"st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [%4], #32\n"
: "=r"(p0),
"=r"(p1),
"=r"(p2),
"=r"(p3),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(p2),
"3"(p3),
"4"(sa)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
if (j + 3 < k) {
j += 4;
asm volatile (
"ld1 {v0.8b}, [%0] \n"
"add %0, %0, #4 \n"
"ld1 {v1.8b}, [%1] \n"
"add %1, %1, #4 \n"
"ld1 {v2.8b}, [%2] \n"
"add %2, %2, #4 \n"
"ld1 {v3.8b}, [%3] \n"
"add %3, %3, #4 \n"
"trn1 v0.2s, v0.2s, v1.2s \n"
"st1 {v0.8b}, [%4], #8 \n"
"trn1 v2.2s, v2.2s, v3.2s \n"
"st1 {v2.8b}, [%4], #8 \n"
: "=r"(p0),
"=r"(p1),
"=r"(p2),
"=r"(p3),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(p2),
"3"(p3),
"4"(sa)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
if (j + 1 < k) {
j += 2;
asm volatile (
"ld1 {v0.8b}, [%0] \n"
"add %0, %0, #2 \n"
"ld1 {v1.8b}, [%1] \n"
"add %1, %1, #2 \n"
"ld1 {v2.8b}, [%2] \n"
"add %2, %2, #2 \n"
"ld1 {v3.8b}, [%3] \n"
"add %3, %3, #2 \n"
"trn1 v0.4h, v0.4h, v1.4h \n"
"trn1 v2.4h, v2.4h, v3.4h \n"
"trn1 v0.2s, v0.2s, v2.2s \n"
"st1 {v0.8b}, [%4], #8 \n"
: "=r"(p0),
"=r"(p1),
"=r"(p2),
"=r"(p3),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(p2),
"3"(p3),
"4"(sa)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
if (j < k) {
*sa++ = *p0;
*sa++ = *p1;
*sa++ = *p2;
*sa++ = *p3;
}
a += 4 * ldx;
}
if (i + 1 < m) {
i += 2;
int8_t *p0 = a;
int8_t *p1 = a + ldx;
int j = 0;
for (; j + 7 < k; j += 8) {
asm volatile (
"ld1 {v0.8b}, [%0], #8 \n"
"ld1 {v1.8b}, [%1], #8 \n"
"st1 {v0.8b, v1.8b}, [%2], #16\n"
: "=r"(p0),
"=r"(p1),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(sa)
: "cc", "memory", "v0", "v1"
);
}
if (j + 3 < k) {
j += 4;
asm volatile (
"ld1 {v0.8b}, [%0] \n"
"add %0, %0, #4 \n"
"ld1 {v1.8b}, [%1] \n"
"add %1, %1, #4 \n"
"trn1 v0.2s, v0.2s, v1.2s \n"
"st1 {v0.8b}, [%2], #8 \n"
: "=r"(p0),
"=r"(p1),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(sa)
: "cc", "memory", "v0", "v1"
);
}
if (j + 1 < k) {
j += 2;
sa[0] = p0[0];
sa[1] = p0[1];
sa[2] = p1[0];
sa[3] = p1[1];
sa += 4;
p0 += 2;
p1 += 2;
}
if (j < k) {
sa[0] = p0[0];
sa[1] = p1[0];
sa += 2;
}
a += 2 * ldx;
}
if (i < m) {
memcpy(sa, a, sizeof(int8_t) * ldx);
}
#if PRINT_MATRIX
print_int8_matrix("sa", origin, m, k, k);
#endif
}
void int8kernel_m1(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int, float* scales, float* bias) {
void *pc = dst;
int8_t *pa = sa;
int8_t *pb = sb;
DECOMPOSE_K
DECOMPOSE_N
if (n4 > 0) {
asm volatile(
"9: \n"
" eor v8.16b, v8.16b, v8.16b \n"
" eor v9.16b, v9.16b, v9.16b \n"
" eor v10.16b, v10.16b, v10.16b\n"
" eor v11.16b, v11.16b, v11.16b\n"
" mov x8, %0 // PanelA\n"
" cmp %w4, #0 \n"
" beq 1f \n"
" mov w19, %w4 \n"
" cmp %w3, #0 \n"
" beq 2f// loop number is even \n"
" // start loopm1_kd8_nd4\n"
" subs w19, w19, #1 \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 // load four lines of B\n"
" ld1 {v2.8b}, [%0], #8 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" saddlp v8.4s, v0.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" saddlp v9.4s, v0.8h \n"
" smull v0.8h, v6.8b, v2.8b \n"
" saddlp v10.4s, v0.8h \n"
" smull v0.8h, v7.8b, v2.8b \n"
" saddlp v11.4s, v0.8h \n"
" cmp w19, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 \n"
" ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [%1], #32\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v2.8b, v4.8b \n"
" smlal v0.8h, v3.8b, v12.8b \n"
" sadalp v8.4s, v0.8h \n"
" smull v1.8h, v2.8b, v5.8b \n"
" smlal v1.8h, v3.8b, v13.8b \n"
" sadalp v9.4s, v1.8h \n"
" smull v0.8h, v2.8b, v6.8b \n"
" smlal v0.8h, v3.8b, v14.8b \n"
" sadalp v10.4s, v0.8h \n"
" smull v1.8h, v2.8b, v7.8b \n"
" smlal v1.8h, v3.8b, v15.8b \n"
" sadalp v11.4s, v1.8h \n"
" subs w19, w19, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v10.4s, v10.4s, v11.4s\n"
" addp v8.4s, v8.4s, v10.4s \n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w5, #0 \n"
" beq 4f \n"
" // start subkernel_m1n4k4 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n"
" sxtl v4.8h, v4.8b \n"
" sxtl v5.8h, v5.8b \n"
" mov v6.d[0], v4.d[1] \n"
" mov v7.d[0], v5.d[1] \n"
" ld1 {v2.8b}, [%0] // load A1x4\n"
" add %0, %0, #4 \n"
" sxtl v2.8h, v2.8b \n"
" smull v12.4s, v2.4h, v4.4h \n"
" smull v13.4s, v2.4h, v6.4h \n"
" smull v14.4s, v2.4h, v5.4h \n"
" smull v15.4s, v2.4h, v7.4h \n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v12.4s, v12.4s, v14.4s\n"
" add v8.4s, v8.4s, v12.4s \n"
" 4: \n"
" cmp %w6, #0 \n"
" beq 5f \n"
" // start subkernel_m1n4k2\n"
" ld1 {v4.8b}, [%0] // load A1x2 \n"
" add %0, %0, #2 \n"
" ld1 {v0.8b}, [%1], #8 // load B2x4 \n"
" mov v4.h[1], v4.h[0] \n"
" mov v4.s[1], v4.s[0] \n"
" smull v0.8h, v0.8b, v4.8b \n"
" sadalp v8.4s, v0.8h \n"
" 5: \n"
" cmp %w7, #0 \n"
" beq 6f \n"
" // start subkernel_m1n4k1 \n"
" ld1 {v4.8b}, [%1] // load B1x4\n"
" add %1, %1, #4 \n"
" ld1 {v2.8b}, [%0] // load A1x1\n"
" add %0, %0, #1 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0]\n"
" 6: \n"
" cmp %9, #0 \n"
" beq 7f \n"
" ldr w24, [%9] \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" // fp32 *= scale_tm \n"
" mov v12.s[0], w24 \n"
" fmul v8.4s, v8.4s, v12.s[0]\n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ldr w24, [%10] \n"
" dup v15.4s, w24 \n"
" fadd v8.4s, v8.4s, v15.4s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s\n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.s}[0], [%2]\n"
" add %2, %2, #4 \n"
" b 10f\n"
" 7: \n"
" st1 {v8.4s}, [%2], #16 \n"
" 10: \n"
" subs %w8, %w8, #1 \n"
" mov %0, x8 \n"
" bne 9b \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc), // %2
"=r"(k8_even),// %3
"=r"(k8), // %4
"=r"(k4), // %5
"=r"(k2), // %6
"=r"(k1), // %7
"=r"(n4), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc),
"3"(k8_even),
"4"(k8),
"5"(k4),
"6"(k2),
"7"(k1),
"8"(n4),
"9"(scales),
"10"(bias)
: "cc", "memory", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n2 > 0) {
asm volatile(
"9: \n"
" eor v8.16b, v8.16b, v8.16b \n"
" eor v9.16b, v9.16b, v9.16b \n"
" eor v10.16b, v10.16b, v10.16b\n"
" eor v11.16b, v11.16b, v11.16b\n"
" mov x8, %0 // PanelA\n"
" cmp %w4, #0 \n"
" beq 1f // k <= 7\n"
" mov w19, %w4\n"
" cmp %w3, #0 \n"
" beq 2f // loop number is even \n"
" // start loopmd1_kd8_nd2 \n"
" subs w19, w19, #1 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n"
" ld1 {v2.8b}, [%0], #8 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" saddlp v8.4s, v0.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" saddlp v9.4s, v0.8h \n"
" cmp w19, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v2.8b, v4.8b \n"
" smlal v0.8h, v3.8b, v6.8b \n"
" sadalp v8.4s, v0.8h \n"
" smull v1.8h, v2.8b, v5.8b \n"
" smlal v1.8h, v3.8b, v7.8b \n"
" sadalp v9.4s, v1.8h \n"
" subs w19, w19, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v8.4s, v8.4s, v8.4s \n"
" // start process kd4 kd2 kd1 cases \n"
" 1: \n"
" cmp %w5, 0 \n"
" beq 4f \n"
" // start subkernel_m1n2k4 \n"
" ld1 {v4.8b}, [%1], #8 // load B4x2\n"
" sxtl v4.8h, v4.8b \n"
" mov v6.d[0], v4.d[1] \n"
" ld1 {v2.8b}, [%0] // load A1x4\n"
" add %0, %0, #4 \n"
" sxtl v2.8h, v2.8b \n"
" smull v9.4s, v2.4h, v4.4h \n"
" smull v10.4s, v2.4h, v6.4h \n"
" addp v9.4s, v9.4s, v10.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" 4: \n"
" cmp %w6, 0 \n"
" beq 5f \n"
" // start subkernel_m1n2k2 \n"
" ld1 {v4.8b}, [%0] // load A1x2\n"
" add %0, %0, #2 \n"
" ld1 {v0.8b}, [%1] // load B2x2\n"
" add %1, %1, #4 \n"
" mov v4.h[1], v4.h[0] \n"
" smull v0.8h, v4.8b, v0.8b \n"
" saddlp v0.4s, v0.8h \n"
" add v8.4s, v8.4s, v0.4s \n"
" 5: \n"
" cmp %w7, 0 \n"
" beq 6f \n"
" // start subkernel_m1n2k1 \n"
" ld1 {v4.8b}, [%1] // load B1x2\n"
" add %1, %1, #2 \n"
" ld1 {v2.8b}, [%0] // load A1x1\n"
" add %0, %0, #2 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0]\n"
" 6: \n"
" cmp %9, #0 \n"
" beq 7f \n"
" // v12: s0 s1 \n"
" ldr w24, [%9] \n"
" mov v12.s[0], w24 \n"
" mov v12.s[1], v12.s[0] \n"
" // int32 => fp32 \n"
" scvtf v8.2s, v8.2s \n"
" // fp32 *= scale_tm \n"
" fmul v8.2s, v8.2s, v12.2s \n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ldr w24, [%10] \n"
" mov v12.s[0], w24 \n"
" mov v12.s[1], v12.s[0] \n"
" fadd v8.2s, v8.2s, v12.2s \n"
" 8:\n"
" // fp32 -> int32 \n"
" fcvtas v8.2s, v8.2s\n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.h}[0], [%2]\n"
" add %2, %2, #2 \n"
" b 10f\n"
" 7: \n"
" st1 {v8.2s}, [%2], #8 \n"
" 10: \n"
" mov %0, x8 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc), // %2
"=r"(k8_even),// %3
"=r"(k8), // %4
"=r"(k4), // %5
"=r"(k2), // %6
"=r"(k1), // %7
"=r"(n4), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc),
"3"(k8_even),
"4"(k8),
"5"(k4),
"6"(k2),
"7"(k1),
"8"(n4),
"9"(scales),
"10"(bias)
: "cc", "memory", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n1 > 0) {
asm volatile (
"9: \n"
" eor v8.16b, v8.16b, v8.16b \n"
" eor v9.16b, v9.16b, v9.16b \n"
" eor v10.16b, v10.16b, v10.16b\n"
" eor v11.16b, v11.16b, v11.16b\n"
" cmp %w4, #0 \n"
" beq 1f // k <= 7 \n"
" mov w19, %w4\n"
" cmp %w3, #0 \n"
" beq 2f // loop number is even \n"
" // start loopkd8_nd1 \n"
" subs w19, w19, #1 \n"
" ld1 {v4.8b}, [%1], #8 // load B line \n"
" ld1 {v2.8b}, [%0], #8 // load A line \n"
" smull v0.8h, v4.8b, v2.8b \n"
" saddlp v8.4s, v0.8h \n"
" cmp w19, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v24.8b, v25.8b}, [%0], #16\n"
" smull v0.8h, v24.8b, v4.8b \n"
" smlal v0.8h, v25.8b, v5.8b \n"
" sadalp v8.4s, v0.8h \n"
" subs w19, w19, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v8.4s, v8.4s, v8.4s \n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w5, 0 \n"
" beq 4f \n"
" // start subkernel_m1n1k4 \n"
" ld1 {v4.8b}, [%1] // load B4x1\n"
" add %1, %1, #4 \n"
" sxtl v4.8h, v4.8b // extend B4x1 to v4\n"
" ld1 {v2.8b}, [%0] // load A1x4\n"
" add %0, %0, #4 \n"
" sxtl v2.8h, v2.8b \n"
" smull v9.4s, v2.4h, v4.4h \n"
" addp v9.4s, v9.4s, v9.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" 4: \n"
" cmp %w6, 0 \n"
" beq 5f \n"
" // start subkernel_m1n1k2 \n"
" ld1 {v4.8b}, [%0] // load A1x2\n"
" add %0, %0, #2 \n"
" ld1 {v0.8b}, [%1] // load B2x1\n"
" add %1, %1, #2 \n"
" smull v0.8h, v0.8b, v4.8b \n"
" saddlp v0.4s, v0.8h \n"
" add v8.4s, v8.4s, v0.4s \n"
" 5: \n"
" cmp %w7, 0 \n"
" beq 6f \n"
" // start subkernel_m1n1k1 \n"
" ld1 {v0.8b}, [%1] // load B1x1 \n"
" add %1, %1, #1 \n"
" ld1 {v1.8b}, [%0] // load A1x1 \n"
" add %0, %0, #1 \n"
" sxtl v1.8h, v1.8b \n"
" sxtl v0.8h, v0.8b \n"
" smull v0.4s, v1.4h, v0.h[0] \n"
" add v8.4s, v8.4s, v0.4s \n"
" 6: \n"
" cmp %9, #0 \n"
" beq 7f \n"
" // int32 => fp32 \n"
" scvtf v8.2s, v8.2s \n"
" // fp32 *= scale_tm\n"
" ldr w24, [%9] \n"
" mov v12.s[0], w24 \n"
" fmul v8.2s, v8.2s, v12.2s \n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ldr w24, [%10] \n"
" mov v12.s[0], w24 \n"
" fadd v8.2s, v8.2s, v12.2s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.2s, v8.2s\n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.b}[0], [%2]\n"
" b 10f \n"
" 7: \n"
" st1 {v8.s}[0], [%2] \n"
" 10: \n"
" mov x0, #0 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc), // %2
"=r"(k8_even),// %3
"=r"(k8), // %4
"=r"(k4), // %5
"=r"(k2), // %6
"=r"(k1), // %7
"=r"(n4), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc),
"3"(k8_even),
"4"(k8),
"5"(k4),
"6"(k2),
"7"(k1),
"8"(n4),
"9"(scales),
"10"(bias)
: "cc", "memory", "x0", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
}
void int8kernel_m2(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int ldc, float* scales, float* bias) {
void *pc0, *pc1;
if (scales == 0) {
pc0 = (int32_t*)dst;
pc1 = ((int32_t*)pc0) + ldc;
} else {
pc0 = dst;
pc1 = ((int8_t*)pc0) + ldc;
}
int8_t *pa = sa;
int8_t *pb = sb;
DECOMPOSE_K
DECOMPOSE_N
if (n4 > 0) {
asm volatile(
"9: \n"
" eor v8.16b, v8.16b, v8.16b \n"
" eor v9.16b, v9.16b, v9.16b \n"
" eor v10.16b, v10.16b, v10.16b \n"
" eor v11.16b, v11.16b, v11.16b \n"
" eor v12.16b, v12.16b, v12.16b \n"
" eor v13.16b, v13.16b, v13.16b \n"
" eor v14.16b, v14.16b, v14.16b \n"
" eor v15.16b, v15.16b, v15.16b \n"
" eor v16.16b, v16.16b, v16.16b \n"
" eor v17.16b, v17.16b, v17.16b \n"
" eor v18.16b, v18.16b, v18.16b \n"
" eor v19.16b, v19.16b, v19.16b \n"
" eor v20.16b, v20.16b, v20.16b \n"
" eor v21.16b, v21.16b, v21.16b \n"
" eor v22.16b, v22.16b, v22.16b \n"
" eor v23.16b, v23.16b, v23.16b \n"
" mov x8, %0 // PanelA \n"
" cmp %w5, #0 \n"
" beq 1f \n"
" mov w17, %w5 \n"
" cmp %w4, #0 \n"
" beq 2f // loop number is even \n"
" // start loopm2_kd8_nd4\n"
" subs w17, w17, #1 \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 // load four lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v9.4s, v0.8h \n"
" saddlp v13.4s, v1.8h \n"
" smull v0.8h, v6.8b, v2.8b \n"
" smull v1.8h, v6.8b, v3.8b \n"
" saddlp v10.4s, v0.8h \n"
" saddlp v14.4s, v1.8h \n"
" smull v0.8h, v7.8b, v2.8b \n"
" smull v1.8h, v7.8b, v3.8b \n"
" saddlp v11.4s, v0.8h \n"
" saddlp v15.4s, v1.8h \n"
" cmp w17, #0 \n"
" beq 3f \n"
" 2: \n"
" add x12, %1, #32 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x12], #16 \n"
" ld1 {v24.8b, v25.8b}, [%0], #16\n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v8.4s, v0.8h\n"
" sadalp v9.4s, v1.8h\n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v12.4s, v0.8h\n"
" sadalp v13.4s, v1.8h\n"
" // start v10v11, v14v15, v18v19, v22v23, error here!\n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x12], #16 \n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v10.4s, v0.8h \n"
" sadalp v11.4s, v1.8h \n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v14.4s, v0.8h \n"
" sadalp v15.4s, v1.8h \n"
" add %1, %1, #32 \n"
" subs w17, w17, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v10.4s, v10.4s, v11.4s\n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v8.4s, v8.4s, v10.4s \n"
" addp v9.4s, v12.4s, v14.4s \n"
" // start process kd4 kd2 kd1 cases \n"
" 1: \n"
" cmp %w6, #0 \n"
" beq 4f \n"
" // start subkernel_m2n4k4 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n"
" sxtl v4.8h, v4.8b \n"
" sxtl v5.8h, v5.8b \n"
" mov v6.d[0], v4.d[1] \n"
" mov v7.d[0], v5.d[1] \n"
" ld1 {v2.8b}, [%0], #8 // load A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v12.4s, v2.4h, v4.4h \n"
" smull v13.4s, v2.4h, v6.4h \n"
" smull v14.4s, v2.4h, v5.4h \n"
" smull v15.4s, v2.4h, v7.4h \n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v12.4s, v12.4s, v14.4s\n"
" add v8.4s, v8.4s, v12.4s \n"
" smull v16.4s, v3.4h, v4.4h \n"
" smull v17.4s, v3.4h, v6.4h \n"
" smull v18.4s, v3.4h, v5.4h \n"
" smull v19.4s, v3.4h, v7.4h \n"
" addp v16.4s, v16.4s, v17.4s\n"
" addp v18.4s, v18.4s, v19.4s\n"
" addp v16.4s, v16.4s, v18.4s\n"
" add v9.4s, v9.4s, v16.4s \n"
" 4: \n"
" cmp %w7, #0 \n"
" beq 5f \n"
" // start subkernel_m2n4k2 \n"
" ld1 {v4.8b}, [%0] // load A2x2 \n"
" add %0, %0, #4 \n"
" ld1 {v0.8b}, [%1], #8 // load B2x4 \n"
" // 00 11 22 33 \n"
" rev32 v1.4h, v0.4h // 11 00 33 22 \n"
" rev64 v2.2s, v0.2s // 22 33 00 11 \n"
" rev64 v3.4h, v0.4h // 33 22 11 00 \n"
" smull v12.8h, v4.8b, v0.8b \n"
" smull v13.8h, v4.8b, v1.8b \n"
" smull v14.8h, v4.8b, v2.8b \n"
" smull v15.8h, v4.8b, v3.8b \n"
" saddlp v12.4s, v12.8h \n"
" saddlp v13.4s, v13.8h \n"
" saddlp v14.4s, v14.8h \n"
" saddlp v15.4s, v15.8h \n"
" mov v16.s[0], v12.s[0] \n"
" mov v16.s[1], v13.s[0] \n"
" mov v16.s[2], v14.s[0] \n"
" mov v16.s[3], v15.s[0] \n"
" mov v17.s[0], v13.s[1] \n"
" mov v17.s[1], v12.s[1] \n"
" mov v17.s[2], v15.s[1] \n"
" mov v17.s[3], v14.s[1] \n"
" add v8.4s, v8.4s, v16.4s \n"
" add v9.4s, v9.4s, v17.4s \n"
" 5: \n"
" cmp %w8, #0 \n"
" beq 6f \n"
" // start subkernel_m2n4k1 \n"
" ld1 {v4.8b}, [%1] // load B1x4\n"
" add %1, %1, #4 \n"
" ld1 {v2.8b}, [%0] // load A2x1\n"
" add %0, %0, #2 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0]\n"
" smlal v9.4s, v4.4h, v2.h[1]\n"
" 6: \n"
" cmp %10, #0 \n"
" beq 7f \n"
" ld1 {v12.2s}, [%10] \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" scvtf v9.4s, v9.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.s[0]\n"
" fmul v9.4s, v9.4s, v12.s[1]\n"
" cmp %11, #0 \n"
" beq 8f \n"
" // fp32 += scales_tm \n"
" ld1 {v14.2s}, [%11] \n"
" dup v15.4s, v14.s[0] \n"
" fadd v8.4s, v8.4s, v15.4s \n"
" dup v15.4s, v14.s[1] \n"
" fadd v9.4s, v9.4s, v15.4s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s\n"
" fcvtas v9.4s, v9.4s\n"
" // int32 -> int16 \n"
" sqxtn v6.4h, v8.4s \n"
" sqxtn2 v6.8h, v9.4s\n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v6.8h \n"
" // save \n"
" st1 {v8.s}[0], [%2] \n"
" add %2, %2, #4 \n"
" st1 {v8.s}[1], [%3] \n"
" add %3, %3, #4 \n"
" b 10f \n"
" 7: \n"
" st1 {v8.4s}, [%2], #16 \n"
" st1 {v9.4s}, [%3], #16 \n"
" 10: \n"
" subs %w9, %w9, #1 \n"
" mov %0, x8 \n"
" bne 9b \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(k8_even),// %4
"=r"(k8), // %5
"=r"(k4), // %6
"=r"(k2), // %7
"=r"(k1), // %8
"=r"(n4), // %9
"=r"(scales), // %10
"=r"(bias) // %11
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(k8_even),
"5"(k8),
"6"(k4),
"7"(k2),
"8"(k1),
"9"(n4),
"10"(scales),
"11"(bias)
: "cc", "memory", "x8", "w17", "x12", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n2 > 0) {
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"9: \n"
" mov x8, %0 // PanelA \n"
" cmp %w5, #0 \n"
" beq 1f \n"
" mov w17, %w5 \n"
" cmp %w4, #0 \n"
" beq 2f // loop number is even \n"
" // start loopmd2_kd8_nd2 \n"
" subs w17, w17, #1 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v9.4s, v0.8h \n"
" saddlp v13.4s, v1.8h \n"
" cmp w17, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [%1], #16 \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v24.8b, v25.8b}, [%0], #16\n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v8.4s, v0.8h\n"
" sadalp v9.4s, v1.8h\n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v12.4s, v0.8h \n"
" sadalp v13.4s, v1.8h \n"
" subs w17, w17, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v12.4s, v12.4s, v12.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w6, #0 \n"
" beq 4f \n"
" // start subkernel_m2n2k4 \n"
" ld1 {v4.8b}, [%1], #8 // load B4x2\n"
" sxtl v4.8h, v4.8b \n"
" mov v6.d[0], v4.d[1] \n"
" ld1 {v2.8b}, [%0], #8 // load first A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v9.4s, v2.4h, v4.4h \n"
" smull v10.4s, v2.4h, v6.4h \n"
" addp v9.4s, v9.4s, v10.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" smull v13.4s, v3.4h, v4.4h \n"
" smull v14.4s, v3.4h, v6.4h \n"
" addp v13.4s, v13.4s, v14.4s\n"
" addp v13.4s, v13.4s, v13.4s\n"
" add v12.4s, v12.4s, v13.4s \n"
" 4: \n"
" cmp %w7, 0 \n"
" beq 5f \n"
" // start subkernel_m2n2k2 \n"
" ld1 {v4.8b}, [%0] // load A2x2\n"
" add %0, %0, #4 \n"
" ld1 {v0.8b}, [%1] // load B2x2\n"
" add %1, %1, #4 \n"
" // 00 11\n"
" rev32 v1.4h, v0.4h // 11 00\n"
" smull v21.8h, v4.8b, v0.8b \n"
" smull v22.8h, v4.8b, v1.8b \n"
" saddlp v21.4s, v21.8h \n"
" saddlp v22.4s, v22.8h \n"
" mov v9.s[0], v21.s[0] \n"
" mov v9.s[1], v22.s[0] \n"
" add v8.4s, v8.4s, v9.4s \n"
" mov v13.s[0], v22.s[1] \n"
" mov v13.s[1], v21.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" 5: \n"
" cmp %w8, #0 \n"
" beq 6f \n"
" // start subkernel_m2n2k1 \n"
" ld1 {v4.8b}, [%1] // load B1x2\n"
" add %1, %1, #2 \n"
" ld1 {v2.8b}, [%0] // load A4x1\n"
" add %0, %0, #2 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0]\n"
" smlal v12.4s, v4.4h, v2.h[1] \n"
" 6: \n"
" cmp %9, #0 \n"
" beq 7f \n"
" mov v8.d[1], v12.d[0] \n"
" // v12: 0 1 \n"
" ld1 {v12.2s}, [%9] \n"
" zip1 v12.4s, v12.4s, v12.4s\n"
" // v12: 0 0 1 1 \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.4s \n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ld1 {v12.2s}, [%10] \n"
" zip1 v12.4s, v12.4s, v12.4s\n"
" fadd v8.4s, v8.4s, v12.4s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s \n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.h}[0], [%2] \n"
" add %2, %2, #2 \n"
" st1 {v8.h}[1], [%3] \n"
" add %3, %3, #2 \n"
" b 10f \n"
" 7:"
" st1 {v8.2s}, [%2], #8 \n"
" st1 {v12.2s}, [%3], #8 \n"
" 10: \n"
" mov %0, x8 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(k8_even),// %4
"=r"(k8), // %5
"=r"(k4), // %6
"=r"(k2), // %7
"=r"(k1), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(k8_even),
"5"(k8),
"6"(k4),
"7"(k2),
"8"(k1),
"9"(scales),
"10"(bias)
: "cc", "memory", "x8", "x12", "w17", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n1 > 0) {
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"9: \n"
" cmp %w5, #0 \n"
" beq 1f // k <=7\n"
" mov w17, %w5\n"
" cmp %w4, #0 \n"
" beq 2f // loop number is even \n"
" // start loopkd8_nd1 \n"
" subs w17, w17, #1 \n"
" ld1 {v4.8b}, [%1], #8 // load four lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" cmp w17, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v24.8b, v25.8b, v26.8b, v27.8b}, [%0], #32\n"
" smull v0.8h, v24.8b, v4.8b \n"
" smlal v0.8h, v26.8b, v5.8b \n"
" sadalp v8.4s, v0.8h \n"
" smull v1.8h, v25.8b, v4.8b \n"
" smlal v1.8h, v27.8b, v5.8b \n"
" sadalp v12.4s, v1.8h \n"
" subs w17, w17, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v12.4s, v12.4s, v12.4s\n"
" addp v12.4s, v12.4s, v12.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w6, #0 \n"
" beq 4f \n"
" // start subkernel_m2n1k2 \n"
" ld1 {v4.8b}, [%1] // load B4x1\n"
" add %1, %1, #4 \n"
" sxtl v4.8h, v4.8b // extend B4x1 to v4\n"
" ld1 {v2.8b}, [%0], #8 // load A2x4 \n"
" sxtl v2.8h, v2.8b \n"
" mov v5.d[0], v2.d[1] \n"
" smull v9.4s, v2.4h, v4.4h \n"
" addp v9.4s, v9.4s, v9.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" smull v13.4s, v5.4h, v4.4h \n"
" addp v13.4s, v13.4s, v13.4s\n"
" addp v13.4s, v13.4s, v13.4s\n"
" add v12.4s, v12.4s, v13.4s \n"
" 4: \n"
" cmp %w7, 0 \n"
" beq 5f \n"
" // start subkernel_m2n1k2 \n"
" ld1 {v4.8b}, [%0] // load A2x2\n"
" add %0, %0, #4 \n"
" ld1 {v0.8b}, [%1] // load B2x1\n"
" add %1, %1, #2 \n"
" mov v0.h[1], v0.h[0] \n"
" smull v0.8h, v0.8b, v4.8b \n"
" saddlp v0.4s, v0.8h \n"
" mov v9.s[0], v0.s[0] \n"
" add v8.4s, v8.4s, v9.4s \n"
" mov v13.s[0], v0.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" 5: \n"
" cmp %w8, 0 \n"
" beq 6f \n"
" // start subkernel_m2n1k1 \n"
" ld1 {v0.8b}, [%1] // load B1x1\n"
" add %1, %1, #1 \n"
" ld1 {v1.8b}, [%0] // load A2x1\n"
" add %0, %0, #2 \n"
" sxtl v1.8h, v1.8b \n"
" sxtl v0.8h, v0.8b \n"
" smull v0.4s, v1.4h, v0.h[0]\n"
" mov v1.s[0], v0.s[1] \n"
" add v8.4s, v8.4s, v0.4s \n"
" add v12.4s, v12.4s, v1.4s \n"
" 6: \n"
" cmp %w9, #0 \n"
" beq 7f \n"
" mov v8.s[1], v12.s[0] \n"
" // v12: s0 s1 \n"
" ld1 {v12.2s}, [%9] \n"
" // int32 => fp32 \n"
" scvtf v8.2s, v8.2s \n"
" // fp32 *= scale_tm \n"
" fmul v8.2s, v8.2s, v12.2s \n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ld1 {v12.2s}, [%10] \n"
" fadd v8.2s, v8.2s, v12.2s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.2s, v8.2s \n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.b}[0], [%2] \n"
" st1 {v8.b}[1], [%3] \n"
" b 10f \n"
" 7: \n"
" st1 {v8.s}[0], [%2] \n"
" st1 {v12.s}[0], [%3] \n"
" 10: \n"
" mov x0, #0 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(k8_even),// %4
"=r"(k8), // %5
"=r"(k4), // %6
"=r"(k2), // %7
"=r"(k1), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(k8_even),
"5"(k8),
"6"(k4),
"7"(k2),
"8"(k1),
"9"(scales),
"10"(bias)
: "cc", "memory", "x0", "x8", "w17", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
}
void int8kernel_m4(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int ldc, float* scales, float* bias) {
void *pc0, *pc1, *pc2, *pc3;
if (scales == 0) {
pc0 = (int32_t*)dst;
pc1 = ((int32_t*)pc0) + ldc;
pc2 = ((int32_t*)pc1) + ldc;
pc3 = ((int32_t*)pc2) + ldc;
} else {
pc0 = dst;
pc1 = ((int8_t*)pc0) + ldc;
pc2 = ((int8_t*)pc1) + ldc;
pc3 = ((int8_t*)pc2) + ldc;
}
int8_t *pa = sa;
int8_t *pb = sb;
DECOMPOSE_K
DECOMPOSE_N
if (n4 > 0) {
asm volatile(
"8: \n"
" eor v8.8b, v8.8b, v8.8b \n"
" eor v9.8b, v9.8b, v9.8b \n"
" eor v10.8b, v10.8b, v10.8b \n"
" eor v11.8b, v11.8b, v11.8b \n"
" eor v12.8b, v12.8b, v12.8b \n"
" eor v13.8b, v13.8b, v13.8b \n"
" eor v14.8b, v14.8b, v14.8b \n"
" eor v15.8b, v15.8b, v15.8b \n"
" eor v16.8b, v16.8b, v16.8b \n"
" eor v17.8b, v17.8b, v17.8b \n"
" eor v18.8b, v18.8b, v18.8b \n"
" eor v19.8b, v19.8b, v19.8b \n"
" eor v20.8b, v20.8b, v20.8b \n"
" eor v21.8b, v21.8b, v21.8b \n"
" eor v22.8b, v22.8b, v22.8b \n"
" eor v23.8b, v23.8b, v23.8b \n"
" mov x8, %0 \n"
" cmp %w7, #0 \n"
" beq 1f \n"
" mov w20, %w7 \n"
" cmp %w6, #0 \n"
" beq 2f \n"
" subs w20, w20, #1 \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v9.4s, v0.8h \n"
" saddlp v13.4s, v1.8h \n"
" smull v0.8h, v6.8b, v2.8b \n"
" smull v1.8h, v6.8b, v3.8b \n"
" saddlp v10.4s, v0.8h \n"
" saddlp v14.4s, v1.8h \n"
" smull v0.8h, v7.8b, v2.8b \n"
" smull v1.8h, v7.8b, v3.8b \n"
" saddlp v11.4s, v0.8h \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" saddlp v15.4s, v1.8h \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v16.4s, v0.8h \n"
" saddlp v20.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v17.4s, v0.8h \n"
" saddlp v21.4s, v1.8h \n"
" smull v0.8h, v6.8b, v2.8b \n"
" smull v1.8h, v6.8b, v3.8b \n"
" saddlp v18.4s, v0.8h \n"
" saddlp v22.4s, v1.8h \n"
" smull v0.8h, v7.8b, v2.8b \n"
" smull v1.8h, v7.8b, v3.8b \n"
" saddlp v19.4s, v0.8h \n"
" saddlp v23.4s, v1.8h \n"
" cmp w20, #0 \n"
" beq 3f \n"
" 2: \n"
" add x15, %x1, #32 \n"
" add x14, %x0, #32 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16\n"
" ld1 {v2.8b, v3.8b}, [%0], #16\n"
" smull v0.8h, v4.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x15], #16 \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v24.8b, v25.8b}, [x14], #16\n"
" smlal v0.8h, v6.8b, v24.8b\n"
" smlal v1.8h, v7.8b, v24.8b\n"
" sadalp v8.4s, v0.8h\n"
" sadalp v9.4s, v1.8h\n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v12.4s, v0.8h\n"
" sadalp v13.4s, v1.8h\n"
" // finish v8v9 v12v13, start proc v16v17,v20v21\n"
" ld1 {v28.8b, v29.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v28.8b \n"
" smull v1.8h, v5.8b, v28.8b \n"
" ld1 {v26.8b, v27.8b}, [x14], #16\n"
" smlal v0.8h, v6.8b, v26.8b \n"
" smlal v1.8h, v7.8b, v26.8b \n"
" sadalp v16.4s, v0.8h \n"
" sadalp v17.4s, v1.8h \n"
" smull v0.8h, v4.8b, v29.8b \n"
" smull v1.8h, v5.8b, v29.8b \n"
" smlal v0.8h, v6.8b, v27.8b \n"
" smlal v1.8h, v7.8b, v27.8b \n"
" sadalp v20.4s, v0.8h \n"
" sadalp v21.4s, v1.8h \n"
" // start v10v11, v14v15, v18v19, v22v23\n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x15], #16 \n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v10.4s, v0.8h \n"
" sadalp v11.4s, v1.8h \n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v14.4s, v0.8h \n"
" sadalp v15.4s, v1.8h \n"
" smull v0.8h, v4.8b, v28.8b \n"
" smull v1.8h, v5.8b, v28.8b \n"
" smlal v0.8h, v6.8b, v26.8b \n"
" smlal v1.8h, v7.8b, v26.8b \n"
" sadalp v18.4s, v0.8h \n"
" sadalp v19.4s, v1.8h \n"
" smull v0.8h, v4.8b, v29.8b \n"
" smull v1.8h, v5.8b, v29.8b \n"
" smlal v0.8h, v6.8b, v27.8b \n"
" smlal v1.8h, v7.8b, v27.8b \n"
" sadalp v22.4s, v0.8h \n"
" sadalp v23.4s, v1.8h \n"
" add %0, %0, #32 \n"
" add %1, %1, #32 \n"
" subs w20, w20, #2 \n"
" bne 2b \n"
// start nd2
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v10.4s, v10.4s, v11.4s\n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v16.4s, v16.4s, v17.4s\n"
" addp v18.4s, v18.4s, v19.4s\n"
" addp v20.4s, v20.4s, v21.4s\n"
" addp v22.4s, v22.4s, v23.4s\n"
" addp v8.4s, v8.4s, v10.4s \n"
" addp v9.4s, v12.4s, v14.4s \n"
" addp v10.4s, v16.4s, v18.4s\n"
" addp v11.4s, v20.4s, v22.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w8, #0 \n"
" beq 4f \n"
" // start subkernel_m4n4k4\n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n"
" sxtl v4.8h, v4.8b \n"
" mov v6.d[0], v4.d[1] \n"
" sxtl v5.8h, v5.8b \n"
" mov v7.d[0], v5.d[1] \n"
" ld1 {v2.8b}, [%0], #8 // load A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v12.4s, v2.4h, v4.4h \n"
" smull v13.4s, v2.4h, v6.4h \n"
" smull v14.4s, v2.4h, v5.4h \n"
" addp v12.4s, v12.4s, v13.4s\n"
" smull v15.4s, v2.4h, v7.4h \n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v12.4s, v12.4s, v14.4s\n"
" smull v16.4s, v3.4h, v4.4h \n"
" add v8.4s, v8.4s, v12.4s \n"
" smull v17.4s, v3.4h, v6.4h \n"
" smull v18.4s, v3.4h, v5.4h \n"
" addp v16.4s, v16.4s, v17.4s\n"
" smull v19.4s, v3.4h, v7.4h \n"
" addp v18.4s, v18.4s, v19.4s\n"
" addp v16.4s, v16.4s, v18.4s\n"
" add v9.4s, v9.4s, v16.4s \n"
" ld1 {v2.8b}, [%0], #8 // load next A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v12.4s, v2.4h, v4.4h \n"
" smull v13.4s, v2.4h, v6.4h \n"
" smull v14.4s, v2.4h, v5.4h \n"
" addp v12.4s, v12.4s, v13.4s\n"
" smull v15.4s, v2.4h, v7.4h \n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v12.4s, v12.4s, v14.4s\n"
" smull v16.4s, v3.4h, v4.4h \n"
" add v10.4s, v10.4s, v12.4s \n"
" smull v17.4s, v3.4h, v6.4h \n"
" smull v18.4s, v3.4h, v5.4h \n"
" addp v16.4s, v16.4s, v17.4s\n"
" smull v19.4s, v3.4h, v7.4h \n"
" addp v18.4s, v18.4s, v19.4s\n"
" addp v16.4s, v16.4s, v18.4s\n"
" add v11.4s, v11.4s, v16.4s \n"
" 4: \n"
" cmp %w9, #0 \n"
" beq 5f \n"
" // start subkernel_m4n4k2 \n"
" ld1 {v0.8b}, [%1], #8 // load B2x4 \n"
" // 00 11 22 33 \n"
" rev32 v1.4h, v0.4h // 11 00 33 22 \n"
" rev64 v2.2s, v0.2s // 22 33 00 11 \n"
" ld1 {v4.8b}, [%0], #8 // load A4x2 \n"
" rev64 v3.4h, v0.4h // 33 22 11 00 \n"
" smull v12.8h, v4.8b, v0.8b \n"
" smull v13.8h, v4.8b, v1.8b \n"
" saddlp v12.4s, v12.8h \n"
" smull v14.8h, v4.8b, v2.8b \n"
" saddlp v13.4s, v13.8h \n"
" smull v15.8h, v4.8b, v3.8b \n"
" saddlp v14.4s, v14.8h \n"
" saddlp v15.4s, v15.8h \n"
" mov v16.s[0], v12.s[0] \n"
" mov v16.s[1], v13.s[0] \n"
" mov v16.s[2], v14.s[0] \n"
" mov v16.s[3], v15.s[0] \n"
" mov v17.s[0], v13.s[1] \n"
" mov v17.s[1], v12.s[1] \n"
" mov v17.s[2], v15.s[1] \n"
" mov v17.s[3], v14.s[1] \n"
" mov v18.s[0], v14.s[2] \n"
" mov v18.s[1], v15.s[2] \n"
" mov v18.s[2], v12.s[2] \n"
" mov v18.s[3], v13.s[2] \n"
" mov v19.s[0], v15.s[3] \n"
" mov v19.s[1], v14.s[3] \n"
" mov v19.s[2], v13.s[3] \n"
" mov v19.s[3], v12.s[3] \n"
" add v8.4s, v8.4s, v16.4s \n"
" add v9.4s, v9.4s, v17.4s \n"
" add v10.4s, v10.4s, v18.4s \n"
" add v11.4s, v11.4s, v19.4s \n"
" 5: \n"
" cmp %w10, #0 \n"
" beq 6f \n"
" // start subkernel_m4n4k1\n"
" ld1 {v4.8b}, [%1] // load B1x4\n"
" add %1, %1, #4 \n"
" ld1 {v2.8b}, [%0] // load A4x1\n"
" add %0, %0, #4 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0] \n"
" smlal v9.4s, v4.4h, v2.h[1] \n"
" smlal v10.4s, v4.4h, v2.h[2] \n"
" smlal v11.4s, v4.4h, v2.h[3] \n"
" 6: \n"
" cmp %12, #0 \n"
" beq 9f \n"
" ld1 {v12.4s}, [%12] \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" scvtf v9.4s, v9.4s \n"
" scvtf v10.4s, v10.4s \n"
" scvtf v11.4s, v11.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.s[0] \n"
" fmul v9.4s, v9.4s, v12.s[1] \n"
" fmul v10.4s, v10.4s, v12.s[2] \n"
" fmul v11.4s, v11.4s, v12.s[3] \n"
" cmp %13, #0 \n"
" beq 7f \n"
" ld1 {v14.4s}, [%13] \n"
" dup v15.4s, v14.s[0] \n"
" fadd v8.4s, v8.4s, v15.4s \n"
" dup v15.4s, v14.s[1] \n"
" fadd v9.4s, v9.4s, v15.4s \n"
" dup v15.4s, v14.s[2] \n"
" fadd v10.4s, v10.4s, v15.4s\n"
" dup v15.4s, v14.s[3] \n"
" fadd v11.4s, v11.4s, v15.4s\n"
" 7: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s \n"
" fcvtas v9.4s, v9.4s \n"
" fcvtas v10.4s, v10.4s \n"
" fcvtas v11.4s, v11.4s \n"
" // int32 -> int16 \n"
" sqxtn v6.4h, v8.4s \n"
" sqxtn2 v6.8h, v9.4s \n"
" sqxtn v7.4h, v10.4s \n"
" sqxtn2 v7.8h, v11.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v6.8h \n"
" sqxtn v9.8b, v7.8h \n"
" // save \n"
" st1 {v8.s}[0], [%2] \n"
" add %x2, %x2, #4 \n"
" st1 {v8.s}[1], [%3] \n"
" add %x3, %x3, #4 \n"
" st1 {v9.s}[0], [%4] \n"
" add %x4, %x4, #4 \n"
" st1 {v9.s}[1], [%5] \n"
" add %x5, %x5, #4 \n"
" b 10f \n"
" 9: \n"
" st1 {v8.4s}, [%x2], #16 \n"
" st1 {v9.4s}, [%x3], #16 \n"
" st1 {v10.4s}, [%x4], #16 \n"
" st1 {v11.4s}, [%x5], #16 \n"
" 10: \n"
" subs %x11, %x11, #1 \n"
" mov %x0, x8 \n"
" bne 8b \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(pc2), // %4
"=r"(pc3), // %5
"=r"(k8_even),// %6
"=r"(k8), // %7
"=r"(k4), // %8
"=r"(k2), // %9
"=r"(k1), // %10
"=r"(n4), // %11
"=r"(scales), // %12
"=r"(bias) // %13
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(pc2),
"5"(pc3),
"6"(k8_even),
"7"(k8),
"8"(k4),
"9"(k2),
"10"(k1),
"11"(n4),
"12"(scales),
"13"(bias)
: "cc", "memory", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n2 > 0) {
asm volatile(
" eor v8.8b, v8.8b, v8.8b \n"
" eor v9.8b, v9.8b, v9.8b \n"
" eor v10.8b, v10.8b, v10.8b \n"
" eor v11.8b, v11.8b, v11.8b \n"
" eor v12.8b, v12.8b, v12.8b \n"
" eor v13.8b, v13.8b, v13.8b \n"
" eor v14.8b, v14.8b, v14.8b \n"
" eor v15.8b, v15.8b, v15.8b \n"
" eor v16.8b, v16.8b, v16.8b \n"
" eor v17.8b, v17.8b, v17.8b \n"
" eor v18.8b, v18.8b, v18.8b \n"
" eor v19.8b, v19.8b, v19.8b \n"
" eor v20.8b, v20.8b, v20.8b \n"
" eor v21.8b, v21.8b, v21.8b \n"
" eor v22.8b, v22.8b, v22.8b \n"
" eor v23.8b, v23.8b, v23.8b \n"
"9: \n"
" mov x8, %x0 // PanelA \n"
" cmp %w7, #0 \n"
" beq 1f // k <= 7 \n"
" mov w20, %w7 \n"
" cmp %w6, #0 \n"
" beq 2f// loop number is even \n"
" // start loopkd8_nd2 \n"
" subs w20, w20, #1 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v9.4s, v0.8h \n"
" saddlp v13.4s, v1.8h \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v16.4s, v0.8h \n"
" saddlp v20.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v17.4s, v0.8h \n"
" saddlp v21.4s, v1.8h \n"
" cmp w20, #0 \n"
" beq 3f \n"
" 2: \n"
" add x15, %1, #16 \n"
" add x14, %0, #32 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x15], #16 \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v24.8b, v25.8b}, [x14], #16 \n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v8.4s, v0.8h \n"
" sadalp v9.4s, v1.8h \n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v12.4s, v0.8h \n"
" sadalp v13.4s, v1.8h \n"
" // finish v8v9 v12v13, start proc v16v17,v20v21\n"
" ld1 {v28.8b, v29.8b}, [%0], #16\n"
" smull v0.8h, v4.8b, v28.8b\n"
" smull v1.8h, v5.8b, v28.8b\n"
" ld1 {v26.8b, v27.8b}, [x14], #16\n"
" smlal v0.8h, v6.8b, v26.8b\n"
" smlal v1.8h, v7.8b, v26.8b\n"
" sadalp v16.4s, v0.8h\n"
" sadalp v17.4s, v1.8h\n"
" smull v0.8h, v4.8b, v29.8b\n"
" smull v1.8h, v5.8b, v29.8b\n"
" smlal v0.8h, v6.8b, v27.8b\n"
" smlal v1.8h, v7.8b, v27.8b\n"
" sadalp v20.4s, v0.8h\n"
" sadalp v21.4s, v1.8h\n"
" add %0, %0, #32 \n"
" add %1, %1, #16 \n"
" subs w20, w20, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v16.4s, v16.4s, v17.4s\n"
" addp v20.4s, v20.4s, v21.4s\n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v12.4s, v12.4s, v12.4s\n"
" addp v16.4s, v16.4s, v16.4s\n"
" addp v20.4s, v20.4s, v20.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w8, 0 \n"
" beq 4f \n"
" // start subkernel_m4n2k4 \n"
" ld1 {v4.8b}, [%1], #8 // load B4x2\n"
" sxtl v4.8h, v4.8b \n"
" mov v6.d[0], v4.d[1] \n"
" ld1 {v2.8b}, [%0], #8 // load first A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v9.4s, v2.4h, v4.4h \n"
" smull v10.4s, v2.4h, v6.4h \n"
" addp v9.4s, v9.4s, v10.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" smull v13.4s, v3.4h, v4.4h \n"
" smull v14.4s, v3.4h, v6.4h \n"
" addp v13.4s, v13.4s, v14.4s\n"
" addp v13.4s, v13.4s, v13.4s\n"
" add v12.4s, v12.4s, v13.4s \n"
" ld1 {v2.8b}, [%0], #8 // load next A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v17.4s, v2.4h, v4.4h \n"
" smull v18.4s, v2.4h, v6.4h \n"
" addp v17.4s, v17.4s, v18.4s\n"
" addp v17.4s, v17.4s, v17.4s\n"
" add v16.4s, v16.4s, v17.4s \n"
" smull v21.4s, v3.4h, v4.4h \n"
" smull v22.4s, v3.4h, v6.4h \n"
" addp v21.4s, v21.4s, v22.4s\n"
" addp v21.4s, v21.4s, v21.4s\n"
" add v20.4s, v20.4s, v21.4s \n"
" 4: \n"
" cmp %w9, 0 \n"
" beq 5f \n"
" // start subkernel_m4n2k2 \n"
" ld1 {v4.8b}, [%0], #8 //load A4x2\n"
" ld1 {v0.8b}, [%1] // load B2x2 \n"
" add %1, %1, #4 \n"
" // 00 11 22 33 \n"
" rev32 v1.4h, v0.4h // 11 00 33 22 \n"
" rev64 v2.2s, v0.2s // 22 33 00 11 \n"
" rev64 v3.4h, v0.4h // 33 22 11 00 \n"
" smull v21.8h, v4.8b, v0.8b \n"
" smull v22.8h, v4.8b, v1.8b \n"
" smull v23.8h, v4.8b, v2.8b \n"
" smull v24.8h, v4.8b, v3.8b \n"
" saddlp v21.4s, v21.8h \n"
" saddlp v22.4s, v22.8h \n"
" saddlp v23.4s, v23.8h \n"
" saddlp v24.4s, v24.8h \n"
" mov v9.s[0], v21.s[0] \n"
" mov v9.s[1], v22.s[0] \n"
" add v8.4s, v8.4s, v9.4s\n"
" mov v13.s[0], v22.s[1] \n"
" mov v13.s[1], v21.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" mov v17.s[0], v23.s[2] \n"
" mov v17.s[1], v24.s[2] \n"
" add v16.4s, v16.4s, v17.4s \n"
" mov v21.s[0], v24.s[3] \n"
" mov v21.s[1], v23.s[3] \n"
" add v20.4s, v20.4s, v21.4s \n"
" 5: \n"
" cmp %w10, 0 \n"
" beq 6f \n"
" // start subkernel_m4n2k1\n"
" ld1 {v4.8b}, [%1] // load B1x2\n"
" add %1, %1, #2 \n"
" ld1 {v2.8b}, [%0] // load A4x1\n"
" add %0, %0, #4 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0] \n"
" smlal v12.4s, v4.4h, v2.h[1] \n"
" smlal v16.4s, v4.4h, v2.h[2] \n"
" smlal v20.4s, v4.4h, v2.h[3] \n"
" 6: \n"
" cmp %11, #0 \n"
" beq 7f \n"
" mov v8.d[1], v12.d[0] \n"
" mov v16.d[1], v20.d[0] \n"
" // v12: 0 1 2 3 \n"
" ld1 {v12.4s}, [%11] \n"
" zip2 v13.4s, v12.4s, v12.4s \n"
" zip1 v12.4s, v12.4s, v12.4s \n"
" // v12: 0 0 1 1 \n"
" // v13: 2 2 3 3 \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" scvtf v16.4s, v16.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.4s \n"
" fmul v16.4s, v16.4s, v13.4s\n"
" cmp %12, #0 \n"
" beq 8f // skip add scales \n"
" // fp32 += scales_tm \n"
" ld1 {v12.4s}, [%12] \n"
" zip2 v13.4s, v12.4s, v12.4s\n"
" zip1 v12.4s, v12.4s, v12.4s\n"
" fadd v8.4s, v8.4s, v12.4s \n"
" fadd v16.4s, v16.4s, v13.4s\n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s \n"
" fcvtas v16.4s, v16.4s \n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" sqxtn v16.4h, v16.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" sqxtn v16.8b, v16.8h \n"
" // save \n"
" st1 {v8.h}[0], [%2] \n"
" add %2, %2, #2 \n"
" st1 {v8.h}[1], [%3] \n"
" add %3, %3, #2 \n"
" st1 {v16.h}[0], [%4] \n"
" add %4, %4, #2 \n"
" st1 {v16.h}[1], [%5] \n"
" add %5, %5, #2 \n"
" b 10f \n"
" 7: \n"
" st1 {v8.2s}, [%2], #8 \n"
" st1 {v12.2s}, [%3], #8 \n"
" st1 {v16.2s}, [%4], #8 \n"
" st1 {v20.2s}, [%5], #8 \n"
" 10: \n"
" mov %0, x8 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(pc2), // %4
"=r"(pc3), // %5
"=r"(k8_even),// %6
"=r"(k8), // %7
"=r"(k4), // %8
"=r"(k2), // %9
"=r"(k1), // %10
"=r"(scales), // %11
"=r"(bias) // %12
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(pc2),
"5"(pc3),
"6"(k8_even),
"7"(k8),
"8"(k4),
"9"(k2),
"10"(k1),
"11"(scales),
"12"(bias)
: "cc", "memory", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n1 > 0) {
asm volatile(
" eor v8.8b, v8.8b, v8.8b \n"
" eor v9.8b, v9.8b, v9.8b \n"
" eor v10.8b, v10.8b, v10.8b \n"
" eor v11.8b, v11.8b, v11.8b \n"
" eor v12.8b, v12.8b, v12.8b \n"
" eor v13.8b, v13.8b, v13.8b \n"
" eor v14.8b, v14.8b, v14.8b \n"
" eor v15.8b, v15.8b, v15.8b \n"
" eor v16.8b, v16.8b, v16.8b \n"
" eor v17.8b, v17.8b, v17.8b \n"
" eor v18.8b, v18.8b, v18.8b \n"
" eor v19.8b, v19.8b, v19.8b \n"
" eor v20.8b, v20.8b, v20.8b \n"
" eor v21.8b, v21.8b, v21.8b \n"
" eor v22.8b, v22.8b, v22.8b \n"
" eor v23.8b, v23.8b, v23.8b \n"
"1: \n"
" cmp %w7, #0 \n"
" beq 10f \n"
" mov w20, %w7 \n"
" cmp %w6, #0 \n"
" beq 11f// loop number is even \n"
" // start loopkd8_nd1 \n"
" subs w20, w20, #1 \n"
" ld1 {v4.8b}, [%1], #8 // load four lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v16.4s, v0.8h \n"
" saddlp v20.4s, v1.8h \n"
" cmp w20, #0 \n"
" beq 12f \n"
" 11: \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v24.8b, v25.8b, v26.8b, v27.8b}, [%0], #32\n"
" ld1 {v28.8b, v29.8b, v30.8b, v31.8b}, [%0], #32\n"
" smull v0.8h, v24.8b, v4.8b \n"
" smlal v0.8h, v28.8b, v5.8b \n"
" sadalp v8.4s, v0.8h \n"
" smull v1.8h, v25.8b, v4.8b \n"
" smlal v1.8h, v29.8b, v5.8b \n"
" sadalp v12.4s, v1.8h \n"
" smull v0.8h, v26.8b, v4.8b \n"
" smlal v0.8h, v30.8b, v5.8b \n"
" sadalp v16.4s, v0.8h \n"
" smull v1.8h, v27.8b, v4.8b \n"
" smlal v1.8h, v31.8b, v5.8b \n"
" sadalp v20.4s, v1.8h \n"
" subs w20, w20, #2 \n"
" bne 11b \n"
" 12: \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v12.4s, v12.4s, v12.4s\n"
" addp v12.4s, v12.4s, v12.4s\n"
" addp v16.4s, v16.4s, v16.4s\n"
" addp v16.4s, v16.4s, v16.4s\n"
" addp v20.4s, v20.4s, v20.4s\n"
" addp v20.4s, v20.4s, v20.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 10: \n"
" cmp %w8, #0 \n"
" beq 13f \n"
" // start subkernel_m4n1k2 \n"
" ld1 {v4.8b}, [%1] // load B4x1\n"
" add %x1, %x1, #4 \n"
" sxtl v4.8h, v4.8b // extend B4x1 to v4 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load A4x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v5.d[0], v2.d[1] \n"
" sxtl v3.8h, v3.8b \n"
" mov v6.d[0], v3.d[1] // extend A4x4 to v2,v5,v3,v6\n"
" smull v9.4s, v2.4h, v4.4h \n"
" addp v9.4s, v9.4s, v9.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" smull v13.4s, v5.4h, v4.4h \n"
" addp v13.4s, v13.4s, v13.4s\n"
" addp v13.4s, v13.4s, v13.4s\n"
" add v12.4s, v12.4s, v13.4s \n"
" smull v17.4s, v3.4h, v4.4h \n"
" addp v17.4s, v17.4s, v17.4s\n"
" addp v17.4s, v17.4s, v17.4s\n"
" add v16.4s, v16.4s, v17.4s \n"
" smull v21.4s, v6.4h, v4.4h \n"
" addp v21.4s, v21.4s, v21.4s\n"
" addp v21.4s, v21.4s, v21.4s\n"
" add v20.4s, v20.4s, v21.4s \n"
" 13: \n"
" cmp %w9, #0 \n"
" beq 14f \n"
" // start subkernel_m4n1k2 \n"
" ld1 {v4.8b}, [%0], #8 // load A4x2 \n"
" ld1 {v0.8b}, [%1] // load B2x1 \n"
" add %1, %1, #2 \n"
" mov v0.h[1], v0.h[0] \n"
" mov v0.s[1], v0.s[0] \n"
" smull v0.8h, v0.8b, v4.8b \n"
" saddlp v0.4s, v0.8h \n"
" mov v9.s[0], v0.s[0] \n"
" add v8.4s, v8.4s, v9.4s \n"
" mov v13.s[0], v0.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" mov v17.s[0], v0.s[2] \n"
" add v16.4s, v16.4s, v17.4s \n"
" mov v21.s[0], v0.s[3] \n"
" add v20.4s, v20.4s, v21.4s \n"
" 14: \n"
" cmp %w10, #0 \n"
" beq 15f \n"
" // start subkernel_m4n1k1 \n"
" ld1 {v4.8b}, [%1] // load B1x1\n"
" add %1, %1, #1 \n"
" ld1 {v2.8b}, [%0] // load A4x1\n"
" add %0, %0, #4 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smull v0.4s, v2.4h, v4.h[0]\n"
" add v8.4s, v8.4s, v0.4s \n"
" mov v13.s[0], v0.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" mov v17.s[0], v0.s[2] \n"
" add v16.4s, v16.4s, v17.4s \n"
" mov v21.s[0], v0.s[3] \n"
" add v20.4s, v20.4s, v21.4s \n"
" 15: \n"
// REQUANT
" cmp %11, #0 \n"
" beq 16f \n"
" mov v8.s[1], v12.s[0] \n"
" mov v8.s[2], v16.s[0] \n"
" mov v8.s[3], v20.s[0] \n"
" // v12: s0 s1 s2 s3 \n"
" ld1 {v12.4s}, [%11] \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.4s \n"
" cmp %12, #0 \n"
" beq 17f \n"
" // fp32 += bias_tm \n"
" ld1 {v12.4s}, [%12] \n"
" fadd v8.4s, v8.4s, v12.4s \n"
" 17: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s \n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.b}[0], [%2] \n"
" st1 {v8.b}[1], [%3] \n"
" st1 {v8.b}[2], [%4] \n"
" st1 {v8.b}[3], [%5] \n"
" b 2f \n"
" // no need to add the last output pointer\n"
" 16: \n"
" st1 {v8.s}[0], [%2] \n"
" st1 {v12.s}[0], [%3] \n"
" st1 {v16.s}[0], [%4] \n"
" st1 {v20.s}[0], [%5] \n"
" 2: \n"
" mov x0, #0 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(pc2), // %4
"=r"(pc3), // %5
"=r"(k8_even),// %6
"=r"(k8), // %7
"=r"(k4), // %8
"=r"(k2), // %9
"=r"(k1), // %10
"=r"(scales), // %11
"=r"(bias) // %12
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(pc2),
"5"(pc3),
"6"(k8_even),
"7"(k8),
"8"(k4),
"9"(k2),
"10"(k1),
"11"(scales),
"12"(bias)
: "cc", "memory", "x0", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
}
#undef DECOMPOSE_K
#undef DECOMPOSE_N
void int8kernel(void* dst, const int8_t* sa, const int8_t* sb, int m, int k, int n, int ldc, float* scales, float* bias, const Option& opt) {
int8_t* pa = (int8_t*)sa;
int8_t* pb = (int8_t*)sb;
const int nn = (m >> 2) << 2;
if (scales == 0) {
int32_t* pc = (int32_t*)dst;
#if PRINT_MATRIX
int32_t* origin = pc;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < nn; i += 4) {
int8kernel_m4((void*)(pc + i * ldc), pa + i * k, pb, m, k, n, ldc, 0, 0);
}
pa += nn * k;
pc += nn * ldc;
switch(m-nn)
{
case 3:
int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, 0, 0);
pc += 2 * ldc;
pa += 2 * k;
int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, 0, 0);
break;
case 2:
int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, 0, 0);
break;
case 1:
int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, 0, 0);
break;
case 0:
default:
break;
}
#if PRINT_MATRIX
print_int32_matrix("pc", origin, m, n, ldc);
#endif
} else {
int8_t* pc = (int8_t*)dst;
#if PRINT_MATRIX
print_fp32_vec("scales", scales, m);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < nn; i += 4) {
int8kernel_m4((void*)(pc + i * ldc), pa + i * k, pb, m, k, n, ldc, scales + i, (bias==0)? 0: bias+i);
}
pa += nn * k;
pc += nn * ldc;
scales += nn;
bias = (bias == 0)? 0: bias + nn;
switch(m-nn)
{
case 3:
int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, scales, bias);
pc += 2 * ldc;
pa += 2 * k;
scales += 2;
bias = (bias == 0)? 0: bias + 2;
int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, scales, bias);
break;
case 2:
int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, scales, bias);
break;
case 1:
int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, scales, bias);
break;
case 0:
default:
break;
}
}
return;
}
#ifdef PRINT_MATRIX
#undef PRINT_MATRIX
#endif
#endif
|
median.c | /*
* File: median.c
*
* MATLAB Coder version : 3.0
* C/C++ source code generated on : 15-Nov-2015 19:51:15
*/
/* Include Files */
#include "rt_nonfinite.h"
#include "yaapt.h"
#include "median.h"
#include "yaapt_emxutil.h"
/* Function Definitions */
/*
* Arguments : const emxArray_real_T *x
* emxArray_real_T *y
* Return Type : void
*/
void median(const emxArray_real_T *x, emxArray_real_T *y)
{
int ub_loop;
int i;
int b_i;
int idx[5];
int c_i;
int iwork[5];
int k;
boolean_T p;
int i2;
int j;
int pEnd;
int b_p;
int q;
int qEnd;
int kEnd;
double m;
ub_loop = y->size[0] * y->size[1];
y->size[0] = 1;
y->size[1] = x->size[1];
emxEnsureCapacity((emxArray__common *)y, ub_loop, (int)sizeof(double));
ub_loop = x->size[1];
#pragma omp parallel for \
num_threads(omp_get_max_threads()) \
private(b_i,c_i,k,p,i2,j,pEnd,b_p,q,qEnd,kEnd,m) \
firstprivate(idx,iwork)
for (i = 1; i <= ub_loop; i++) {
b_i = i;
for (c_i = 0; c_i < 5; c_i++) {
idx[c_i] = 0;
}
for (k = 0; k <= 2; k += 2) {
if ((x->data[k + x->size[0] * (b_i - 1)] <= x->data[(k + x->size[0] * (b_i
- 1)) + 1]) || rtIsNaN(x->data[(k + x->size[0] * (b_i - 1)) + 1]))
{
p = true;
} else {
p = false;
}
if (p) {
idx[k] = k + 1;
idx[k + 1] = k + 2;
} else {
idx[k] = k + 2;
idx[k + 1] = k + 1;
}
}
idx[4] = 5;
c_i = 2;
while (c_i < 5) {
i2 = c_i << 1;
j = 1;
for (pEnd = 1 + c_i; pEnd < 6; pEnd = qEnd + c_i) {
b_p = j;
q = pEnd - 1;
qEnd = j + i2;
if (qEnd > 6) {
qEnd = 6;
}
k = 0;
kEnd = qEnd - j;
while (k + 1 <= kEnd) {
if ((x->data[(idx[b_p - 1] + x->size[0] * (b_i - 1)) - 1] <= x->data
[(idx[q] + x->size[0] * (b_i - 1)) - 1]) || rtIsNaN(x->data
[(idx[q] + x->size[0] * (b_i - 1)) - 1])) {
p = true;
} else {
p = false;
}
if (p) {
iwork[k] = idx[b_p - 1];
b_p++;
if (b_p == pEnd) {
while (q + 1 < qEnd) {
k++;
iwork[k] = idx[q];
q++;
}
}
} else {
iwork[k] = idx[q];
q++;
if (q + 1 == qEnd) {
while (b_p < pEnd) {
k++;
iwork[k] = idx[b_p - 1];
b_p++;
}
}
}
k++;
}
for (k = 0; k + 1 <= kEnd; k++) {
idx[(j + k) - 1] = iwork[k];
}
j = qEnd;
}
c_i = i2;
}
if (rtIsNaN(x->data[(idx[4] + x->size[0] * (b_i - 1)) - 1])) {
m = x->data[(idx[4] + x->size[0] * (b_i - 1)) - 1];
} else {
m = x->data[(idx[2] + x->size[0] * (b_i - 1)) - 1];
}
y->data[b_i - 1] = m;
}
}
/*
* File trailer for median.c
*
* [EOF]
*/
|
ab-totient-omp-9.c | // Distributed and parallel technologies, Andrew Beveridge, 03/03/2014
// To Compile: gcc -Wall -O -o ab-totient-omp -fopenmp ab-totient-omp.c
// To Run / Time: /usr/bin/time -v ./ab-totient-omp range_start range_end
#include <stdio.h>
#include <omp.h>
/* When input is a prime number, the totient is simply the prime number - 1. Totient is always even (except for 1).
If n is a positive integer, then φ(n) is the number of integers k in the range 1 ≤ k ≤ n for which gcd(n, k) = 1 */
long getTotient (long number) {
long result = number;
// Check every prime number below the square root for divisibility
if(number % 2 == 0){
result -= result / 2;
do
number /= 2;
while(number %2 == 0);
}
// Primitive replacement for a list of primes, looping through every odd number
long prime;
for(prime = 3; prime * prime <= number; prime += 2){
if(number %prime == 0){
result -= result / prime;
do
number /= prime;
while(number % prime == 0);
}
}
// Last common factor
if(number > 1)
result -= result / number;
// Return the result.
return result;
}
// Main method.
int main(int argc, char ** argv) {
// Load inputs
long lower, upper;
sscanf(argv[1], "%ld", &lower);
sscanf(argv[2], "%ld", &upper);
int i;
long result = 0.0;
// We know the answer if it's 1; no need to execute the function
if(lower == 1) {
result = 1.0;
lower = 2;
}
#pragma omp parallel for default(shared) private(i) schedule(auto) reduction(+:result) num_threads(9)
// Sum all totients in the specified range
for (i = lower; i <= upper; i++) {
result = result + getTotient(i);
}
// Print the result
printf("Sum of Totients between [%ld..%ld] is %ld \n", lower, upper, result);
// A-OK!
return 0;
}
|
GB_binop__plus_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__plus_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fc64)
// A*D function (colscale): GB (_AxD__plus_fc64)
// D*A function (rowscale): GB (_DxB__plus_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_fc64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fc64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fc64)
// C=scalar+B GB (_bind1st__plus_fc64)
// C=scalar+B' GB (_bind1st_tran__plus_fc64)
// C=A+scalar GB (_bind2nd__plus_fc64)
// C=A'+scalar GB (_bind2nd_tran__plus_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GB_FC64_add (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_add (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_FC64 || GxB_NO_PLUS_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__plus_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_add (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_add (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_add (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_add (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mass_sum.c | #include "mass_sum.h"
#define REAL_CELL 1
double mass_sum(int ncells, int* restrict celltype,
double* restrict H, double* restrict dx, double* restrict dy){
double summer = 0.0;
#pragma omp simd reduction(+:summer)
for (int ic=0; ic<ncells ; ic++) {
if (celltype[ic] == REAL_CELL) {
summer += H[ic]*dx[ic]*dy[ic];
}
}
return(summer);
}
|
FGT_fmt_plug.c | /*
* Fortigate (FortiOS) Password cracker
*
* This software is Copyright (c) 2012 Mat G. <mat.jtr at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*
* Passwords are located in "config system admin" part of the configuration file :
*
* config system admin
* edit "<username>"
* set password ENC AK1wTiFOMv7mZOTvQNmKQBAY98hZZjSRLxAY8vZp8NlDWU=
*
* Password is : AK1|base64encode(salt|hashed_password)
* where hashed_password is SHA1(salt|password|fortinet_magic)
*
* salt is 12 bytes long
* hashed_password is 20 bytes long (SHA1 salt)
* encoded password is 47 bytes long (3 bytes for AK1 and 44 bytes of base64encode(salt|hashed_password))
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_FGT;
#elif FMT_REGISTERS_H
john_register_one(&fmt_FGT);
#else
#include <string.h>
#include "common.h"
#include "formats.h"
#include "misc.h"
#include "sha.h"
#include "base64.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
#include <omp.h>
#ifdef __MIC__
#ifndef OMP_SCALE
#define OMP_SCALE 8192
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 32768 // tuned on AMD K8 dual-HT (XOP)
#endif
#endif // __MIC__
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Fortigate"
#define FORMAT_NAME "FortiOS"
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 32
#define CIPHERTEXT_LENGTH 44
#define HASH_LENGTH CIPHERTEXT_LENGTH + 3
#define BINARY_SIZE 20
#define BINARY_ALIGN 4
#define SALT_SIZE 12
#define SALT_ALIGN 4
#define FORTINET_MAGIC "\xa3\x88\xba\x2e\x42\x4c\xb0\x4a\x53\x79\x30\xc1\x31\x07\xcc\x3f\xa1\x32\x90\x29\xa9\x81\x5b\x70"
#define FORTINET_MAGIC_LENGTH 24
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests fgt_tests[] =
{
{"AK1wTiFOMv7mZOTvQNmKQBAY98hZZjSRLxAY8vZp8NlDWU=", "fortigate"},
{"AK1Vd1SCGVtAAT931II/U22WTppAISQkITHOlz0ukIg4nA=", "admin"},
{"AK1DZLDpqz335ElPtuiNTpguiozY7xVaHjHYnxw6sNlI6A=", "ftnt"},
{NULL}
};
static SHA_CTX ctx_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int (*saved_key_len);
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
saved_key_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key_len));
}
static void done(void)
{
MEM_FREE(saved_key_len);
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
if (strncmp(ciphertext, "AK1", 3))
return 0;
if (strlen(ciphertext) != HASH_LENGTH)
return 0;
return 1;
}
static void * get_salt(char *ciphertext)
{
static union {
char b[SALT_SIZE];
ARCH_WORD_32 dummy;
} out;
char buf[SALT_SIZE+BINARY_SIZE+1];
base64_decode(ciphertext+3, CIPHERTEXT_LENGTH, buf);
memcpy(out.b, buf, SALT_SIZE);
return out.b;
}
static void set_salt(void *salt)
{
SHA1_Init(&ctx_salt);
SHA1_Update(&ctx_salt, salt, SALT_SIZE);
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1);
saved_key_len[index] = strlen(key);
}
static char * get_key(int index)
{
return saved_key[index];
}
static void * get_binary(char *ciphertext)
{
static union {
char b[BINARY_SIZE];
ARCH_WORD_32 dummy;
} bin;
char buf[SALT_SIZE+BINARY_SIZE+1];
memset(buf, 0, sizeof(buf));
base64_decode(ciphertext+3, CIPHERTEXT_LENGTH, buf);
// skip over the 12 bytes of salt and get only the hashed password
memcpy(bin.b, buf+SALT_SIZE, BINARY_SIZE);
return bin.b;
}
static int cmp_all(void *binary, int count)
{
ARCH_WORD_32 b0 = *(ARCH_WORD_32 *)binary;
int i;
for (i = 0; i < count; i++) {
if (b0 != *(ARCH_WORD_32 *)crypt_key[i])
continue;
if (!memcmp(binary, crypt_key[i], BINARY_SIZE))
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int i=0;
char *cp=FORTINET_MAGIC;
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(ctx_salt, count, saved_key, saved_key_len, crypt_key, cp)
#endif
#if defined (_OPENMP) || MAX_KEYS_PER_CRYPT>1
for (i = 0; i < count; i++)
#endif
{
SHA_CTX ctx;
memcpy(&ctx, &ctx_salt, sizeof(ctx));
SHA1_Update(&ctx, saved_key[i], saved_key_len[i]);
SHA1_Update(&ctx, cp, FORTINET_MAGIC_LENGTH);
SHA1_Final((unsigned char*)crypt_key[i], &ctx);
}
return count;
}
static int get_hash_0(int index) { return ((ARCH_WORD_32 *)(crypt_key[index]))[0] & PH_MASK_0; }
static int get_hash_1(int index) { return ((ARCH_WORD_32 *)(crypt_key[index]))[0] & PH_MASK_1; }
static int get_hash_2(int index) { return ((ARCH_WORD_32 *)(crypt_key[index]))[0] & PH_MASK_2; }
static int get_hash_3(int index) { return ((ARCH_WORD_32 *)(crypt_key[index]))[0] & PH_MASK_3; }
static int get_hash_4(int index) { return ((ARCH_WORD_32 *)(crypt_key[index]))[0] & PH_MASK_4; }
static int get_hash_5(int index) { return ((ARCH_WORD_32 *)(crypt_key[index]))[0] & PH_MASK_5; }
static int get_hash_6(int index) { return ((ARCH_WORD_32 *)(crypt_key[index]))[0] & PH_MASK_6; }
static int salt_hash(void *salt)
{
ARCH_WORD_32 mysalt = *(ARCH_WORD_32 *)salt;
return mysalt & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_FGT = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP ,
{ NULL },
{ NULL },
fgt_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
ft.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - FT
This benchmark is an OpenMP C version of the NPB FT code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: D. Bailey
W. Saphir
OpenMP C version: S. Satoh
3.0 structure translation: M. Popov
--------------------------------------------------------------------*/
#include "../common/npb-C.h"
#include "../math/nas_math.h"
#include "../paging_benchmark.h"
/* global variables */
#include "global.h"
#include <nautilus/nautilus.h>
#include <nautilus/shell.h>
/* function declarations */
static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX],
int t, int indexmap[NZ][NY][NX], int d[3]);
static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]);
static void ipow46(double a, int exponent, double *result);
static void setup(void);
static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]);
static void print_timers(void);
static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]);
static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]);
static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]);
static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]);
static void fft_init (int n);
static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]);
static void fftz2 (int is, int l, int m, int n, int ny, int ny1,
dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]);
static int ilog2(int n);
static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]);
static void verify (int d1, int d2, int d3, int nt,
boolean *verified, char *class);
/*--------------------------------------------------------------------
c FT benchmark
c-------------------------------------------------------------------*/
static int program_FT(char *_buf, void* _priv);
static struct shell_cmd_impl nas_ft_impl = {
.cmd = "nas-ft",
.help_str = "NAS parallel benchmark FT",
.handler = program_FT,
};
nk_register_shell_cmd(nas_ft_impl);
#ifdef NAUT_CONFIG_ASPACE_PAGING
int program_FT_paging(char * _buf, void *_priv){
return paging_wrapper(_buf, _priv, &program_FT);
}
static struct shell_cmd_impl nas_ft_paging_impl = {
.cmd = "nas-ft-paging",
.help_str = "NAS parallel benchmark FT with paging",
.handler = program_FT_paging,
};
nk_register_shell_cmd(nas_ft_paging_impl);
#endif
int program_FT(char * _buf, void *_priv) {
/*c-------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i, ierr;
/*------------------------------------------------------------------
c u0, u1, u2 are the main arrays in the problem.
c Depending on the decomposition, these arrays will have different
c dimensions. To accomodate all possibilities, we allocate them as
c one-dimensional arrays and pass them to subroutines for different
c views
c - u0 contains the initial (transformed) initial condition
c - u1 and u2 are working arrays
c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the
c time evolution operator.
c-----------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Large arrays are in common so that they are allocated on the
c heap rather than the stack. This common block is not
c referenced directly anywhere else. Padding is to avoid accidental
c cache problems, since all array sizes are powers of two.
c-------------------------------------------------------------------*/
static dcomplex u0[NZ][NY][NX];
static dcomplex pad1[3];
static dcomplex u1[NZ][NY][NX];
static dcomplex pad2[3];
static dcomplex u2[NZ][NY][NX];
static dcomplex pad3[3];
static int indexmap[NZ][NY][NX];
int iter;
int nthreads = 1;
double total_time, mflops;
boolean verified;
char class;
/*--------------------------------------------------------------------
c Run the entire problem once to make sure all data is touched.
c This reduces variable startup costs, which is important for such a
c short benchmark. The other NPB 2 implementations are similar.
c-------------------------------------------------------------------*/
for (i = 0; i < T_MAX; i++) {
timer_clear(i);
}
setup();
compute_indexmap(indexmap, dims[2]);
compute_initial_conditions(u1, dims[0]);
fft_init (dims[0][0]);
fft(1, u1, u0);
/*--------------------------------------------------------------------
c Start over from the beginning. Note that all operations must
c be timed, in contrast to other benchmarks.
c-------------------------------------------------------------------*/
for (i = 0; i < T_MAX; i++) {
timer_clear(i);
}
timer_start(T_TOTAL);
if (TIMERS_ENABLED == TRUE) timer_start(T_SETUP);
compute_indexmap(indexmap, dims[2]);
compute_initial_conditions(u1, dims[0]);
fft_init (dims[0][0]);
if (TIMERS_ENABLED == TRUE) {
timer_stop(T_SETUP);
}
if (TIMERS_ENABLED == TRUE) {
timer_start(T_FFT);
}
fft(1, u1, u0);
if (TIMERS_ENABLED == TRUE) {
timer_stop(T_FFT);
}
for (iter = 1; iter <= niter; iter++) {
if (TIMERS_ENABLED == TRUE) {
timer_start(T_EVOLVE);
}
evolve(u0, u1, iter, indexmap, dims[0]);
if (TIMERS_ENABLED == TRUE) {
timer_stop(T_EVOLVE);
}
if (TIMERS_ENABLED == TRUE) {
timer_start(T_FFT);
}
fft(-1, u1, u2);
if (TIMERS_ENABLED == TRUE) {
timer_stop(T_FFT);
}
if (TIMERS_ENABLED == TRUE) {
timer_start(T_CHECKSUM);
}
checksum(iter, u2, dims[0]);
if (TIMERS_ENABLED == TRUE) {
timer_stop(T_CHECKSUM);
}
}
verify(NX, NY, NZ, niter, &verified, &class);
#pragma omp parallel
{
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop(T_TOTAL);
total_time = timer_read(T_TOTAL);
if( total_time != 0.0) {
mflops = 1.0e-6*(double)(NTOTAL) *
(14.8157+7.19641*log((double)(NTOTAL))
+ (5.23518+7.21113*log((double)(NTOTAL)))*niter)
/total_time;
} else {
mflops = 0.0;
}
c_print_results("FT", class, NX, NY, NZ, niter, nthreads,
total_time, mflops, " floating point", verified,
NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
if (TIMERS_ENABLED == TRUE) print_timers();
return 0;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX],
int t, int indexmap[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c evolve u0 -> u1 (t time steps) in fourier space
c-------------------------------------------------------------------*/
int i, j, k;
#pragma omp parallel for default(shared) private(i,j,k)
for (k = 0; k < d[2]; k++) {
for (j = 0; j < d[1]; j++) {
for (i = 0; i < d[0]; i++) {
crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]);
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Fill in array u0 with initial conditions from
c random number generator
c-------------------------------------------------------------------*/
int k;
double x0, start, an, dummy;
static double tmp[NX*2*MAXDIM+1];
int i,j,t;
start = SEED;
/*--------------------------------------------------------------------
c Jump to the starting element for our first plane.
c-------------------------------------------------------------------*/
ipow46(A, (zstart[0]-1)*2*NX*NY + (ystart[0]-1)*2*NX, &an);
dummy = randlc(&start, an);
ipow46(A, 2*NX*NY, &an);
/*--------------------------------------------------------------------
c Go through by z planes filling in one square at a time.
c-------------------------------------------------------------------*/
for (k = 0; k < dims[0][2]; k++) {
x0 = start;
vranlc(2*NX*dims[0][1], &x0, A, tmp);
t = 1;
for (j = 0; j < dims[0][1]; j++)
for (i = 0; i < NX; i++) {
u0[k][j][i].real = tmp[t++];
u0[k][j][i].imag = tmp[t++];
}
if (k != dims[0][2]) dummy = randlc(&start, an);
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void ipow46(double a, int exponent, double *result) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute a^exponent mod 2^46
c-------------------------------------------------------------------*/
double dummy, q, r;
int n, n2;
/*--------------------------------------------------------------------
c Use
c a^n = a^(n/2)*a^(n/2) if n even else
c a^n = a*a^(n-1) if n odd
c-------------------------------------------------------------------*/
*result = 1;
if (exponent == 0) return;
q = a;
r = 1;
n = exponent;
while (n > 1) {
n2 = n/2;
if (n2 * 2 == n) {
dummy = randlc(&q, q);
n = n2;
} else {
dummy = randlc(&r, q);
n = n-1;
}
}
dummy = randlc(&r, q);
*result = r;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void setup(void) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int ierr, i, j, fstatus;
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
" - FT Benchmark\n\n");
niter = NITER_DEFAULT;
printf(" Size : %3dx%3dx%3d\n", NX, NY, NZ);
printf(" Iterations : %7d\n", niter);
/* 1004 format(' Number of processes : ', i7)
1005 format(' Processor array : ', i3, 'x', i3)
1006 format(' WARNING: compiled for ', i5, ' processes. ',
> ' Will not verify. ')*/
for (i = 0;i < 3 ; i++) {
dims[i][0] = NX;
dims[i][1] = NY;
dims[i][2] = NZ;
}
for (i = 0; i < 3; i++) {
xstart[i] = 1;
xend[i] = NX;
ystart[i] = 1;
yend[i] = NY;
zstart[i] = 1;
zend[i] = NZ;
}
/*--------------------------------------------------------------------
c Set up info for blocking of ffts and transposes. This improves
c performance on cache-based systems. Blocking involves
c working on a chunk of the problem at a time, taking chunks
c along the first, second, or third dimension.
c
c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim)
c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims)
c Since 1st dim is always in processor, we'll assume it's long enough
c (default blocking factor is 16 so min size for 1st dim is 16)
c The only case we have to worry about is cffts1 in a 2d decomposition.
c so the blocking factor should not be larger than the 2nd dimension.
c-------------------------------------------------------------------*/
fftblock = FFTBLOCK_DEFAULT;
fftblockpad = FFTBLOCKPAD_DEFAULT;
if (fftblock != FFTBLOCK_DEFAULT) fftblockpad = fftblock+3;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2
c for time evolution exponent.
c-------------------------------------------------------------------*/
int i, j, k, ii, ii2, jj, ij2, kk;
double ap;
/*--------------------------------------------------------------------
c basically we want to convert the fortran indices
c 1 2 3 4 5 6 7 8
c to
c 0 1 2 3 -4 -3 -2 -1
c The following magic formula does the trick:
c mod(i-1+n/2, n) - n/2
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(i,j,k,ii,ii2,jj,ij2,kk)
for (i = 0; i < dims[2][0]; i++) {
ii = (i+1+xstart[2]-2+NX/2)%NX - NX/2;
ii2 = ii*ii;
for (j = 0; j < dims[2][1]; j++) {
jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2;
ij2 = jj*jj+ii2;
for (k = 0; k < dims[2][2]; k++) {
kk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2;
indexmap[k][j][i] = kk*kk+ij2;
}
}
}
/*--------------------------------------------------------------------
c compute array of exponentials for time evolution.
c-------------------------------------------------------------------*/
ap = - 4.0 * ALPHA * PI * PI;
ex[0] = 1.0;
ex[1] = exp(ap);
for (i = 2; i <= EXPMAX; i++) {
ex[i] = ex[i-1]*ex[1];
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void print_timers(void) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i;
char *tstrings[] = { " total ",
" setup ",
" fft ",
" evolve ",
" checksum ",
" fftlow ",
" fftcopy " };
for (i = 0; i < T_MAX; i++) {
if (timer_read(i) != 0.0) {
printf("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read(i));
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
dcomplex y0[NX][FFTBLOCKPAD];
dcomplex y1[NX][FFTBLOCKPAD];
/*--------------------------------------------------------------------
c note: args x1, x2 must be different arrays
c note: args for cfftsx are (direction, layout, xin, xout, scratch)
c xin/xout may be the same and it can be somewhat faster
c if they are
c-------------------------------------------------------------------*/
if (dir == 1) {
cffts1(1, dims[0], x1, x1, y0, y1); /* x1 -> x1 */
cffts2(1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */
cffts3(1, dims[2], x1, x2, y0, y1); /* x1 -> x2 */
} else {
cffts3(-1, dims[2], x1, x1, y0, y1); /* x1 -> x1 */
cffts2(-1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */
cffts1(-1, dims[0], x1, x2, y0, y1); /* x1 -> x2 */
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i, j, k, jj;
for (i = 0; i < 3; i++) {
logd[i] = ilog2(d[i]);
}
#pragma omp parallel default(shared) private(i,j,k,jj) shared(is)
{
dcomplex y0[NX][FFTBLOCKPAD];
dcomplex y1[NX][FFTBLOCKPAD];
#pragma omp for
for (k = 0; k < d[2]; k++) {
for (jj = 0; jj <= d[1] - fftblock; jj+=fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (j = 0; j < fftblock; j++) {
for (i = 0; i < d[0]; i++) {
y0[i][j].real = x[k][j+jj][i].real;
y0[i][j].imag = x[k][j+jj][i].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz (is, logd[0],
d[0], y0, y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (j = 0; j < fftblock; j++) {
for (i = 0; i < d[0]; i++) {
xout[k][j+jj][i].real = y0[i][j].real;
xout[k][j+jj][i].imag = y0[i][j].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i, j, k, ii;
for (i = 0; i < 3; i++) {
logd[i] = ilog2(d[i]);
}
#pragma omp parallel default(shared) private(i,j,k,ii) shared(is)
{
dcomplex y0[NX][FFTBLOCKPAD];
dcomplex y1[NX][FFTBLOCKPAD];
#pragma omp for
for (k = 0; k < d[2]; k++) {
for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (j = 0; j < d[1]; j++) {
for (i = 0; i < fftblock; i++) {
y0[j][i].real = x[k][j][i+ii].real;
y0[j][i].imag = x[k][j][i+ii].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz (is, logd[1],
d[1], y0, y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (j = 0; j < d[1]; j++) {
for (i = 0; i < fftblock; i++) {
xout[k][j][i+ii].real = y0[j][i].real;
xout[k][j][i+ii].imag = y0[j][i].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i, j, k, ii;
for (i = 0;i < 3; i++) {
logd[i] = ilog2(d[i]);
}
#pragma omp parallel default(shared) private(i,j,k,ii) shared(is)
{
dcomplex y0[NX][FFTBLOCKPAD];
dcomplex y1[NX][FFTBLOCKPAD];
#pragma omp for
for (j = 0; j < d[1]; j++) {
for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (k = 0; k < d[2]; k++) {
for (i = 0; i < fftblock; i++) {
y0[k][i].real = x[k][j][i+ii].real;
y0[k][i].imag = x[k][j][i+ii].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz (is, logd[2],
d[2], y0, y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
for (k = 0; k < d[2]; k++) {
for (i = 0; i < fftblock; i++) {
xout[k][j][i+ii].real = y0[k][i].real;
xout[k][j][i+ii].imag = y0[k][i].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fft_init (int n) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute the roots-of-unity array that will be used for subsequent FFTs.
c-------------------------------------------------------------------*/
int m,nu,ku,i,j,ln;
double t, ti;
/*--------------------------------------------------------------------
c Initialize the U array with sines and cosines in a manner that permits
c stride one access at each FFT iteration.
c-------------------------------------------------------------------*/
nu = n;
m = ilog2(n);
u[0].real = (double)m;
u[0].imag = 0.0;
ku = 1;
ln = 1;
for (j = 1; j <= m; j++) {
t = PI / ln;
for (i = 0; i <= ln - 1; i++) {
ti = i * t;
u[i+ku].real = cos(ti);
u[i+ku].imag = sin(ti);
}
ku = ku + ln;
ln = 2 * ln;
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Computes NY N-point complex-to-complex FFTs of X using an algorithm due
c to Swarztrauber. X is both the input and the output array, while Y is a
c scratch array. It is assumed that N = 2^M. Before calling CFFTZ to
c perform FFTs, the array U must be initialized by calling CFFTZ with IS
c set to 0 and M set to MX, where MX is the maximum value of M for any
c subsequent call.
c-------------------------------------------------------------------*/
int i,j,l,mx;
/*--------------------------------------------------------------------
c Check if input parameters are invalid.
c-------------------------------------------------------------------*/
mx = (int)(u[0].real);
if ((is != 1 && is != -1) || m < 1 || m > mx) {
printf("CFFTZ: Either U has not been initialized, or else\n"
"one of the input parameters is invalid%5d%5d%5d\n",
is, m, mx);
exit(1);
}
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= m; l+=2) {
fftz2 (is, l, m, n, fftblock, fftblockpad, u, x, y);
if (l == m) break;
fftz2 (is, l + 1, m, n, fftblock, fftblockpad, u, y, x);
}
/*--------------------------------------------------------------------
c Copy Y to X.
c-------------------------------------------------------------------*/
if (m % 2 == 1) {
for (j = 0; j < n; j++) {
for (i = 0; i < fftblock; i++) {
x[j][i].real = y[j][i].real;
x[j][i].imag = y[j][i].imag;
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fftz2 (int is, int l, int m, int n, int ny, int ny1,
dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs the L-th iteration of the second variant of the Stockham FFT.
c-------------------------------------------------------------------*/
int k,n1,li,lj,lk,ku,i,j,i11,i12,i21,i22;
dcomplex u1,x11,x21;
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
n1 = n / 2;
if (l-1 == 0) {
lk = 1;
} else {
lk = 2 << ((l - 1)-1);
}
if (m-l == 0) {
li = 1;
} else {
li = 2 << ((m - l)-1);
}
lj = 2 * lk;
ku = li;
for (i = 0; i < li; i++) {
i11 = i * lk;
i12 = i11 + n1;
i21 = i * lj;
i22 = i21 + lk;
if (is >= 1) {
u1.real = u[ku+i].real;
u1.imag = u[ku+i].imag;
} else {
u1.real = u[ku+i].real;
u1.imag = -u[ku+i].imag;
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k = 0; k < lk; k++) {
for (j = 0; j < ny; j++) {
double x11real, x11imag;
double x21real, x21imag;
x11real = x[i11+k][j].real;
x11imag = x[i11+k][j].imag;
x21real = x[i12+k][j].real;
x21imag = x[i12+k][j].imag;
y[i21+k][j].real = x11real + x21real;
y[i21+k][j].imag = x11imag + x21imag;
y[i22+k][j].real = u1.real * (x11real - x21real)
- u1.imag * (x11imag - x21imag);
y[i22+k][j].imag = u1.real * (x11imag - x21imag)
+ u1.imag * (x11real - x21real);
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static int ilog2(int n) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int nn, lg;
if (n == 1) {
return 0;
}
lg = 1;
nn = 2;
while (nn < n) {
nn = nn << 1;
lg++;
}
return lg;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]) {
#pragma omp parallel default(shared)
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int j, q,r,s, ierr;
dcomplex chk,allchk;
chk.real = 0.0;
chk.imag = 0.0;
#pragma omp for nowait
for (j = 1; j <= 1024; j++) {
q = j%NX+1;
if (q >= xstart[0] && q <= xend[0]) {
r = (3*j)%NY+1;
if (r >= ystart[0] && r <= yend[0]) {
s = (5*j)%NZ+1;
if (s >= zstart[0] && s <= zend[0]) {
cadd(chk,chk,u1[s-zstart[0]][r-ystart[0]][q-xstart[0]]);
}
}
}
}
#pragma omp critical
{
sums[i].real += chk.real;
sums[i].imag += chk.imag;
}
#pragma omp barrier
#pragma omp single
{
/* complex % real */
sums[i].real = sums[i].real/(double)(NTOTAL);
sums[i].imag = sums[i].imag/(double)(NTOTAL);
printf("T = %5d Checksum = %22.12e %22.12e\n",
i, sums[i].real, sums[i].imag);
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void verify (int d1, int d2, int d3, int nt,
boolean *verified, char *class) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int ierr, size, i;
double err, epsilon;
/*--------------------------------------------------------------------
c Sample size reference checksums
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Class S size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_s[6+1] = { 0.0,
5.546087004964e+02,
5.546385409189e+02,
5.546148406171e+02,
5.545423607415e+02,
5.544255039624e+02,
5.542683411902e+02 };
double vdata_imag_s[6+1] = { 0.0,
4.845363331978e+02,
4.865304269511e+02,
4.883910722336e+02,
4.901273169046e+02,
4.917475857993e+02,
4.932597244941e+02 };
/*--------------------------------------------------------------------
c Class W size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_w[6+1] = { 0.0,
5.673612178944e+02,
5.631436885271e+02,
5.594024089970e+02,
5.560698047020e+02,
5.530898991250e+02,
5.504159734538e+02 };
double vdata_imag_w[6+1] = { 0.0,
5.293246849175e+02,
5.282149986629e+02,
5.270996558037e+02,
5.260027904925e+02,
5.249400845633e+02,
5.239212247086e+02 };
/*--------------------------------------------------------------------
c Class A size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_a[6+1] = { 0.0,
5.046735008193e+02,
5.059412319734e+02,
5.069376896287e+02,
5.077892868474e+02,
5.085233095391e+02,
5.091487099959e+02 };
double vdata_imag_a[6+1] = { 0.0,
5.114047905510e+02,
5.098809666433e+02,
5.098144042213e+02,
5.101336130759e+02,
5.104914655194e+02,
5.107917842803e+02 };
/*--------------------------------------------------------------------
c Class B size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_b[20+1] = { 0.0,
5.177643571579e+02,
5.154521291263e+02,
5.146409228649e+02,
5.142378756213e+02,
5.139626667737e+02,
5.137423460082e+02,
5.135547056878e+02,
5.133910925466e+02,
5.132470705390e+02,
5.131197729984e+02,
5.130070319283e+02,
5.129070537032e+02,
5.128182883502e+02,
5.127393733383e+02,
5.126691062020e+02,
5.126064276004e+02,
5.125504076570e+02,
5.125002331720e+02,
5.124551951846e+02,
5.124146770029e+02 };
double vdata_imag_b[20+1] = { 0.0,
5.077803458597e+02,
5.088249431599e+02,
5.096208912659e+02,
5.101023387619e+02,
5.103976610617e+02,
5.105948019802e+02,
5.107404165783e+02,
5.108576573661e+02,
5.109577278523e+02,
5.110460304483e+02,
5.111252433800e+02,
5.111968077718e+02,
5.112616233064e+02,
5.113203605551e+02,
5.113735928093e+02,
5.114218460548e+02,
5.114656139760e+02,
5.115053595966e+02,
5.115415130407e+02,
5.115744692211e+02 };
/*--------------------------------------------------------------------
c Class C size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_c[20+1] = { 0.0,
5.195078707457e+02,
5.155422171134e+02,
5.144678022222e+02,
5.140150594328e+02,
5.137550426810e+02,
5.135811056728e+02,
5.134569343165e+02,
5.133651975661e+02,
5.132955192805e+02,
5.132410471738e+02,
5.131971141679e+02,
5.131605205716e+02,
5.131290734194e+02,
5.131012720314e+02,
5.130760908195e+02,
5.130528295923e+02,
5.130310107773e+02,
5.130103090133e+02,
5.129905029333e+02,
5.129714421109e+02 };
double vdata_imag_c[20+1] = { 0.0,
5.149019699238e+02,
5.127578201997e+02,
5.122251847514e+02,
5.121090289018e+02,
5.121143685824e+02,
5.121496764568e+02,
5.121870921893e+02,
5.122193250322e+02,
5.122454735794e+02,
5.122663649603e+02,
5.122830879827e+02,
5.122965869718e+02,
5.123075927445e+02,
5.123166486553e+02,
5.123241541685e+02,
5.123304037599e+02,
5.123356167976e+02,
5.123399592211e+02,
5.123435588985e+02,
5.123465164008e+02 };
epsilon = 1.0e-12;
*verified = TRUE;
*class = 'U';
if (d1 == 64 &&
d2 == 64 &&
d3 == 64 &&
nt == 6) {
*class = 'S';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_s[i]) / vdata_real_s[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_s[i]) / vdata_imag_s[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 128 &&
d2 == 128 &&
d3 == 32 &&
nt == 6) {
*class = 'W';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_w[i]) / vdata_real_w[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_w[i]) / vdata_imag_w[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 256 &&
d2 == 256 &&
d3 == 128 &&
nt == 6) {
*class = 'A';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_a[i]) / vdata_real_a[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_a[i]) / vdata_imag_a[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 512 &&
d2 == 256 &&
d3 == 256 &&
nt == 20) {
*class = 'B';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_b[i]) / vdata_real_b[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_b[i]) / vdata_imag_b[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 512 &&
d2 == 512 &&
d3 == 512 &&
nt == 20) {
*class = 'C';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_c[i]) / vdata_real_c[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_c[i]) / vdata_imag_c[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
}
if (*class != 'U') {
printf("Result verification successful\n");
} else {
printf("Result verification failed\n");
}
printf("class = %1c\n", *class);
}
|
matmul.c | #include <stdlib.h>
#include <sys/time.h>
#include <stdio.h>
#if OMP == 1
#include <omp.h>
#endif
#ifndef _N_
#define _N_ 512
#endif
#ifndef HOST_MEM_ALIGNMENT
#define HOST_MEM_ALIGNMENT 1
#endif
#if HOST_MEM_ALIGNMENT == 1
#define AOCL_ALIGNMENT 64
#endif
int N = _N_;
int M = _N_;
int P = _N_;
double my_timer ()
{
struct timeval time;
gettimeofday (&time, 0);
return time.tv_sec + time.tv_usec / 1000000.0;
}
void
MatrixMultiplication_openacc(float * a, float * b, float * c)
{
int i, j, k ;
#pragma acc data pcopyout(a[0:(M*N)]), copyin(b[0:(M*P)],c[0:(P*N)])
{
#pragma acc kernels loop independent gang
for (i=0; i<M; i++){
#pragma acc loop worker
for (j=0; j<N; j++)
{
float sum = 0.0 ;
#pragma acc loop seq
for (k=0; k<P; k++) {
sum += b[i*P+k]*c[k*N+j] ;
}
a[i*N+j] = sum ;
}
}
}
}
void
MatrixMultiplication_openmp(float * a,float * b, float * c)
{
int i, j, k ;
int chunk = N/4;
#pragma omp parallel shared(a,b,c,chunk) private(i,j,k)
{
#ifdef _OPENMP
if(omp_get_thread_num() == 0) {
printf("Number of OpenMP threads %d\n", omp_get_num_threads());
}
#endif
#pragma omp for
for (i=0; i<M; i++){
for (j=0; j<N; j++)
{
float sum = 0.0 ;
for (k=0; k<P; k++)
sum += b[i*P+k]*c[k*N+j] ;
a[i*N+j] = sum ;
}
}
}
}
int main()
{
float *a, *b, *c;
#if HOST_MEM_ALIGNMENT == 1
void *p;
#endif
int i;
double elapsed_time;
#if HOST_MEM_ALIGNMENT == 1
posix_memalign(&p, AOCL_ALIGNMENT, N*N*sizeof(float));
a = (float *)p;
posix_memalign(&p, AOCL_ALIGNMENT, N*N*sizeof(float));
b = (float *)p;
posix_memalign(&p, AOCL_ALIGNMENT, N*N*sizeof(float));
c = (float *)p;
#else
a = (float *) malloc(M*N*sizeof(float));
b = (float *) malloc(M*P*sizeof(float));
c = (float *) malloc(P*N*sizeof(float));
#endif
for (i = 0; i < M*N; i++) {
a[i] = (float) 0.0;
}
for (i = 0; i < M*P; i++) {
b[i] = (float) i;
}
for (i = 0; i < P*N; i++) {
c[i] = (float) 1.0;
}
elapsed_time = my_timer();
MatrixMultiplication_openmp(a,b,c);
elapsed_time = my_timer() - elapsed_time;
printf("CPU Elapsed time = %lf sec\n", elapsed_time);
elapsed_time = my_timer();
MatrixMultiplication_openacc(a,b,c);
elapsed_time = my_timer() - elapsed_time;
printf("Accelerator Elapsed time = %lf sec\n", elapsed_time);
free(a);
free(b);
free(c);
return 0;
}
|
GB_AxB_dot2.c | //------------------------------------------------------------------------------
// GB_AxB_dot2: compute C=A'*B or C<!M>=A'*B in parallel, in-place
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// This method always constructs C as bitmap; it then converts C to sparse or
// hyper if A or B are hypersparse. The C<M>=A'*B dot product when C is sparse
// is computed by GB_AxB_dot3. This method handles the case when C is bitmap.
// TODO: this is slower than it could be if A and B are both bitmap, when
// A->vlen is large, and likely if A and B are both either bitmap or full.
// This is because the inner loop is a simple full/bitmap dot product, across
// the entire input vectors. No tiling is used, so cache performance is not
// as good as it could be. For large problems, C=(A')*B is faster with
// the saxpy3 method, as compared to this method with C=A'*B.
#include "GB_mxm.h"
#include "GB_subref.h"
#include "GB_ek_slice.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_AxB__include1.h"
#ifndef GBCOMPACT
#include "GB_AxB__include2.h"
#endif
#define GB_FREE_ALL \
{ \
GB_phbix_free (M2) ; \
GB_WERK_POP (M_ek_slicing, int64_t) ; \
GB_WERK_POP (B_slice, int64_t) ; \
GB_WERK_POP (A_slice, int64_t) ; \
}
GB_PUBLIC
GrB_Info GB_AxB_dot2 // C=A'*B or C<!M>=A'*B, dot product method
(
GrB_Matrix C, // output matrix, static header
const bool C_iso, // true if C is iso
const GB_void *cscalar, // iso value of C
const GrB_Matrix M_in, // mask matrix for C<!M>=A'*B, may be NULL
const bool Mask_comp, // if true, use !M
const bool Mask_struct, // if true, use the only structure of M
const GrB_Matrix A_in, // input matrix
const GrB_Matrix B_in, // input matrix
const GrB_Semiring semiring, // semiring that defines C=A*B
const bool flipxy, // if true, do z=fmult(b,a) vs fmult(a,b)
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (C != NULL && C->static_header) ;
ASSERT_MATRIX_OK_OR_NULL (M_in, "M for dot A'*B", GB0) ;
ASSERT_MATRIX_OK (A_in, "A for dot A'*B", GB0) ;
ASSERT_MATRIX_OK (B_in, "B for dot A'*B", GB0) ;
ASSERT (!GB_ZOMBIES (M_in)) ;
ASSERT (GB_JUMBLED_OK (M_in)) ;
ASSERT (!GB_PENDING (M_in)) ;
ASSERT (!GB_ZOMBIES (A_in)) ;
ASSERT (!GB_JUMBLED (A_in)) ;
ASSERT (!GB_PENDING (A_in)) ;
ASSERT (!GB_ZOMBIES (B_in)) ;
ASSERT (!GB_JUMBLED (B_in)) ;
ASSERT (!GB_PENDING (B_in)) ;
ASSERT_SEMIRING_OK (semiring, "semiring for numeric A'*B", GB0) ;
GrB_Matrix M = NULL ;
struct GB_Matrix_opaque M2_header ;
GrB_Matrix M2 = NULL ;
GB_WERK_DECLARE (A_slice, int64_t) ;
GB_WERK_DECLARE (B_slice, int64_t) ;
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
ASSERT (A_in->vlen == B_in->vlen) ;
ASSERT (A_in->vlen > 0) ;
if (M_in == NULL)
{
GBURBLE ("(%s=%s'*%s) ",
GB_sparsity_char (GxB_BITMAP),
GB_sparsity_char_matrix (A_in),
GB_sparsity_char_matrix (B_in)) ;
}
else
{
GBURBLE ("(%s%s%s%s%s=%s'*%s) ",
GB_sparsity_char (GxB_BITMAP),
Mask_struct ? "{" : "<",
Mask_comp ? "!" : "",
GB_sparsity_char_matrix (M_in),
Mask_struct ? "}" : ">",
GB_sparsity_char_matrix (A_in),
GB_sparsity_char_matrix (B_in)) ;
}
//--------------------------------------------------------------------------
// construct shallow copies of A and B, if hypersparse
//--------------------------------------------------------------------------
// If A_in is hypersparse, a new sparse matrix A is constructed with
// A->vdim = A_in->nvec and the same vlen as A_in, and then the
// hyper_shallow C->vlen will equal A->vdim < cvlen_final.
// If B_in is hypersparse, a new sparse matrix B is constructed with
// B->vdim = B_in->nvec and the same vlen as B_in, and then the
// hyper_shallow C->vdim will equal B->vdim < cvdim_final.
int64_t cvlen_final = A_in->vdim ;
int64_t cvdim_final = B_in->vdim ;
bool A_is_hyper = GB_IS_HYPERSPARSE (A_in) ;
bool B_is_hyper = GB_IS_HYPERSPARSE (B_in) ;
bool A_or_B_hyper = A_is_hyper || B_is_hyper ;
GrB_Index *restrict Ah = (GrB_Index *) A_in->h ;
GrB_Index *restrict Bh = (GrB_Index *) B_in->h ;
struct GB_Matrix_opaque A_header, B_header ;
GrB_Matrix A = (A_is_hyper) ? GB_hyper_shallow (&A_header, A_in) : A_in ;
GrB_Matrix B = (B_is_hyper) ? GB_hyper_shallow (&B_header, B_in) : B_in ;
ASSERT (!GB_IS_HYPERSPARSE (A)) ;
ASSERT (!GB_IS_HYPERSPARSE (B)) ;
//--------------------------------------------------------------------------
// determine the size of C
//--------------------------------------------------------------------------
int64_t cnvec = B->nvec ;
int64_t cvlen = A->vdim ;
int64_t cvdim = B->vdim ;
int64_t cnz ;
bool ok = GB_Index_multiply ((GrB_Index *) (&cnz), cvlen, cvdim) ;
//--------------------------------------------------------------------------
// extract the submask if A or B are hypersparse
//--------------------------------------------------------------------------
if (A_or_B_hyper && M_in != NULL)
{
// M2 = M_in (Ah, Bh), where M2 has a static header
// if Mask_struct then M2 is extracted as iso
M2 = GB_clear_static_header (&M2_header) ;
GB_OK (GB_subref (M2, Mask_struct, M_in->is_csc, M_in,
(A_is_hyper) ? Ah : GrB_ALL, cvlen,
(B_is_hyper) ? Bh : GrB_ALL, cvdim, false, Context)) ;
M = M2 ;
ASSERT_MATRIX_OK_OR_NULL (M, "M submask dot A'*B", GB0) ;
}
else
{
// use the mask as-is
M = M_in ;
}
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
int64_t naslice = 0 ;
int64_t nbslice = 0 ;
int64_t anvec = A->nvec ;
double anz = (double) GB_nnz_held (A) ;
int64_t bnvec = B->nvec ;
double bnz = (double) GB_nnz_held (B) ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz + bnz, chunk, nthreads_max) ;
#define GB_NTASKS_PER_THREAD 32
if (nthreads == 1)
{
// do the entire computation with a single thread
naslice = 1 ;
nbslice = 1 ;
}
else
{
// determine number of slices for A' and B
if (bnvec == 1)
{
// C and B are single vectors
naslice = GB_NTASKS_PER_THREAD * nthreads ;
nbslice = 1 ;
}
else if (anvec == 1 || bnvec == 0
|| bnvec > GB_NTASKS_PER_THREAD * nthreads)
{
// A is a single vector, or B is empty, or B is large: just slice B
naslice = 1 ;
nbslice = GB_NTASKS_PER_THREAD * nthreads ;
}
else
{
// slice B into individual vectors
nbslice = bnvec ;
// slice A' to get a total of about 16*nthreads tasks
naslice = (GB_NTASKS_PER_THREAD * nthreads) / nbslice ;
// but do not slice A too finely
naslice = GB_IMIN (naslice, anvec/4) ;
naslice = GB_IMAX (naslice, nthreads) ;
}
}
//--------------------------------------------------------------------------
// get the semiring operators
//--------------------------------------------------------------------------
GrB_BinaryOp mult = semiring->multiply ;
GrB_Monoid add = semiring->add ;
ASSERT (mult->ztype == add->op->ztype) ;
bool A_is_pattern, B_is_pattern ;
GB_binop_pattern (&A_is_pattern, &B_is_pattern, flipxy, mult->opcode) ;
//--------------------------------------------------------------------------
// allocate workspace and slice A and B
//--------------------------------------------------------------------------
// A and B can have any sparsity: full, bitmap, sparse, or hypersparse.
// C is always created as bitmap
GB_WERK_PUSH (A_slice, naslice + 1, int64_t) ;
GB_WERK_PUSH (B_slice, nbslice + 1, int64_t) ;
if (A_slice == NULL || B_slice == NULL || !ok)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_pslice (A_slice, A->p, A->nvec, naslice, false) ;
GB_pslice (B_slice, B->p, B->nvec, nbslice, false) ;
//--------------------------------------------------------------------------
// allocate C
//--------------------------------------------------------------------------
// if M is sparse/hyper, then calloc C->b; otherwise use malloc
bool M_is_sparse_or_hyper = (M != NULL) &&
(GB_IS_SPARSE (M) || GB_IS_HYPERSPARSE (M)) ;
GrB_Type ctype = add->op->ztype ;
// set C->iso = C_iso OK
GB_OK (GB_new_bix (&C, true, // bitmap, static header
ctype, cvlen, cvdim, GB_Ap_malloc, true, GxB_BITMAP,
M_is_sparse_or_hyper, B->hyper_switch, cnvec, cnz, true, C_iso,
Context)) ;
//--------------------------------------------------------------------------
// if M is sparse/hyper, scatter it into the C bitmap
//--------------------------------------------------------------------------
if (M_is_sparse_or_hyper)
{
// FUTURE:: could just set Cb [pC] = 2 since Cb has just been calloc'd.
// However, in the future, this method might be able to modify C on
// input, in which case C->b will not be all zero.
int M_ntasks, M_nthreads ;
GB_SLICE_MATRIX (M, 8, chunk) ;
// Cb [pC] += 2 for each entry M(i,j) in the mask
GB_bitmap_M_scatter (C,
NULL, 0, GB_ALL, NULL, NULL, 0, GB_ALL, NULL,
M, Mask_struct, GB_ASSIGN, GB_BITMAP_M_SCATTER_PLUS_2,
M_ek_slicing, M_ntasks, M_nthreads, Context) ;
// the bitmap of C now contains:
// Cb (i,j) = 0: cij not present, mij zero
// Cb (i,j) = 1: cij present, mij zero (not used yet)
// Cb (i,j) = 2: cij not present, mij 1
// Cb (i,j) = 3: cij present, mij 1 (not used yet)
GB_WERK_POP (M_ek_slicing, int64_t) ;
}
//--------------------------------------------------------------------------
// C<#>=A'*B, computing each entry with a dot product, via builtin semiring
//--------------------------------------------------------------------------
if (C_iso)
{
//----------------------------------------------------------------------
// C is iso; compute the pattern of C<#>=A'*B with the any_pair semiring
//----------------------------------------------------------------------
memcpy (C->x, cscalar, ctype->size) ;
info = GB (_Adot2B__any_pair_iso) (C, M, Mask_comp, Mask_struct, A,
true, A_slice, B, true, B_slice, nthreads, naslice, nbslice) ;
ASSERT (info != GrB_NO_VALUE) ;
}
else
{
//----------------------------------------------------------------------
// C is non-iso
//----------------------------------------------------------------------
bool done = false ;
#ifndef GBCOMPACT
//------------------------------------------------------------------
// define the worker for the switch factory
//------------------------------------------------------------------
#define GB_Adot2B(add,mult,xname) \
GB (_Adot2B_ ## add ## mult ## xname)
#define GB_AxB_WORKER(add,mult,xname) \
{ \
info = GB_Adot2B (add,mult,xname) (C, M, Mask_comp, \
Mask_struct, A, A_is_pattern, A_slice, B, B_is_pattern, \
B_slice, nthreads, naslice, nbslice) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
GB_Opcode mult_opcode, add_opcode ;
GB_Type_code xcode, ycode, zcode ;
if (GB_AxB_semiring_builtin (A, A_is_pattern, B, B_is_pattern,
semiring, flipxy, &mult_opcode, &add_opcode, &xcode, &ycode,
&zcode))
{
#include "GB_AxB_factory.c"
}
ASSERT (info == GrB_SUCCESS || info == GrB_NO_VALUE) ;
#endif
//----------------------------------------------------------------------
// C = A'*B, computing each entry with a dot product, with typecasting
//----------------------------------------------------------------------
if (!done)
{
#define GB_DOT2_GENERIC
GB_BURBLE_MATRIX (C, "(generic C%s=A'*B) ", (M == NULL) ? "" :
(Mask_comp ? "<!M>" : "<M>")) ;
#include "GB_AxB_dot_generic.c"
}
}
//--------------------------------------------------------------------------
// free workspace
//--------------------------------------------------------------------------
GB_FREE_ALL ;
C->magic = GB_MAGIC ;
ASSERT_MATRIX_OK (C, "dot2: C = A'*B output", GB0) ;
ASSERT (!GB_ZOMBIES (C)) ;
//--------------------------------------------------------------------------
// convert C to sparse/hyper if A or B are hypersparse on input
//--------------------------------------------------------------------------
if (A_or_B_hyper)
{
//----------------------------------------------------------------------
// convert C from bitmap to sparse/hyper
//----------------------------------------------------------------------
// C is currently A_in->nvec by B_in->nvec, in bitmap form. It must be
// converted back into sparse/hypersparse form, with zombies.
//----------------------------------------------------------------------
// allocate the sparse/hypersparse structure of the final C
//----------------------------------------------------------------------
int64_t *restrict Cp = NULL ; size_t Cp_size = 0 ;
int64_t *restrict Ch = NULL ; size_t Ch_size = 0 ;
int64_t *restrict Ci = NULL ; size_t Ci_size = 0 ;
Cp = GB_MALLOC (cvdim+1, int64_t, &Cp_size) ;
Ch = NULL ;
if (B_is_hyper)
{
Ch = GB_MALLOC (cvdim, int64_t, &Ch_size) ;
}
Ci = GB_MALLOC (cnz, int64_t, &Ci_size) ;
if (Cp == NULL || (B_is_hyper && Ch == NULL) || Ci == NULL)
{
// out of memory
GB_phbix_free (C) ;
GB_FREE (&Cp, Cp_size) ;
GB_FREE (&Ch, Ch_size) ;
GB_FREE (&Ci, Ci_size) ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// construct the hyperlist of C, if B is hypersparse
//----------------------------------------------------------------------
nthreads = GB_nthreads (cvdim, chunk, nthreads_max) ;
if (B_is_hyper)
{
// C becomes hypersparse
ASSERT (cvdim == B_in->nvec) ;
GB_memcpy (Ch, B_in->h, cvdim * sizeof (int64_t), nthreads) ;
}
//----------------------------------------------------------------------
// construct the vector pointers of C
//----------------------------------------------------------------------
int64_t pC ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pC = 0 ; pC < cvdim+1 ; pC++)
{
Cp [pC] = pC * cvlen ;
}
//----------------------------------------------------------------------
// construct the pattern of C from its bitmap
//----------------------------------------------------------------------
// C(i,j) becomes a zombie if not present in the bitmap
nthreads = GB_nthreads (cnz, chunk, nthreads_max) ;
int8_t *restrict Cb = C->b ;
if (A_is_hyper)
{
ASSERT (cvlen == A_in->nvec) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pC = 0 ; pC < cnz ; pC++)
{
int64_t i = Ah [pC % cvlen] ;
Ci [pC] = (Cb [pC]) ? i : GB_FLIP (i) ;
}
}
else
{
ASSERT (cvlen == cvlen_final && cvlen == A->vdim) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pC = 0 ; pC < cnz ; pC++)
{
int64_t i = pC % cvlen ;
Ci [pC] = (Cb [pC]) ? i : GB_FLIP (i) ;
}
}
//----------------------------------------------------------------------
// transplant the new content and finalize C
//----------------------------------------------------------------------
C->p = Cp ; Cp = NULL ; C->p_size = Cp_size ;
C->h = Ch ; Ch = NULL ; C->h_size = Ch_size ;
C->i = Ci ; Ci = NULL ; C->i_size = Ci_size ;
C->nzombies = cnz - C->nvals ;
C->vdim = cvdim_final ;
C->vlen = cvlen_final ;
C->nvals = -1 ;
C->nvec = cvdim ;
C->plen = cvdim ;
C->nvec_nonempty = (cvlen == 0) ? 0 : cvdim ;
// free the bitmap
GB_FREE ((&C->b), C->b_size) ;
// C is now sparse or hypersparse
ASSERT_MATRIX_OK (C, "dot2: converted back from bitmap C", GB0) ;
ASSERT (GB_ZOMBIES_OK (C)) ;
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
ASSERT (GB_ZOMBIES_OK (C)) ;
ASSERT (!GB_JUMBLED (C)) ;
ASSERT (!GB_PENDING (C)) ;
return (GrB_SUCCESS) ;
}
|
prop3DAcoIsoDenQ_DEO2_FDTD.h | #ifndef PROP3DACOISODENQ_DEO2_FDTD_H
#define PROP3DACOISODENQ_DEO2_FDTD_H
#include <omp.h>
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <fftw3.h>
#include <complex>
#include "propagatorStaticFunctions.h"
#define MIN(x,y) ((x)<(y)?(x):(y))
class Prop3DAcoIsoDenQ_DEO2_FDTD {
public:
const bool _freeSurface;
const long _nbx, _nby, _nbz, _nthread, _nx, _ny, _nz, _nsponge;
const float _dx, _dy, _dz, _dt;
const float _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz;
float * __restrict__ _v = NULL;
float * __restrict__ _b = NULL;
float * __restrict__ _dtOmegaInvQ = NULL;
float * __restrict__ _pSpace = NULL;
float * __restrict__ _tmpPx1 = NULL;
float * __restrict__ _tmpPy1 = NULL;
float * __restrict__ _tmpPz1 = NULL;
float * __restrict__ _tmpPx2 = NULL;
float * __restrict__ _tmpPy2 = NULL;
float * __restrict__ _tmpPz2 = NULL;
float * _pOld = NULL;
float * _pCur = NULL;
Prop3DAcoIsoDenQ_DEO2_FDTD(
bool freeSurface,
long nthread,
long nx,
long ny,
long nz,
long nsponge,
float dx,
float dy,
float dz,
float dt,
const long nbx,
const long nby,
const long nbz) :
_freeSurface(freeSurface),
_nthread(nthread),
_nx(nx),
_ny(ny),
_nz(nz),
_nsponge(nsponge),
_nbx(nbx),
_nby(nby),
_nbz(nbz),
_dx(dx),
_dy(dy),
_dz(dz),
_dt(dt),
_c8_1(+1225.0 / 1024.0),
_c8_2(-245.0 / 3072.0),
_c8_3(+49.0 / 5120.0),
_c8_4(-5.0 / 7168.0),
_invDx(1.0 / _dx),
_invDy(1.0 / _dy),
_invDz(1.0 / _dz) {
// Allocate arrays
_v = new float[_nx * _ny * _nz];
_b = new float[_nx * _ny * _nz];
_dtOmegaInvQ = new float[_nx * _ny * _nz];
_pSpace = new float[_nx * _ny * _nz];
_tmpPx1 = new float[_nx * _ny * _nz];
_tmpPy1 = new float[_nx * _ny * _nz];
_tmpPz1 = new float[_nx * _ny * _nz];
_tmpPx2 = new float[_nx * _ny * _nz];
_tmpPy2 = new float[_nx * _ny * _nz];
_tmpPz2 = new float[_nx * _ny * _nz];
_pOld = new float[_nx * _ny * _nz];
_pCur = new float[_nx * _ny * _nz];
numaFirstTouch(_nx, _ny, _nz, _nthread, _v, _b,
_dtOmegaInvQ, _pSpace, _tmpPx1, _tmpPy1, _tmpPz1, _tmpPx2, _tmpPy2, _tmpPz2,
_pOld, _pCur, _nbx, _nby, _nbz);
}
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void numaFirstTouch(
const long nx,
const long ny,
const long nz,
const long nthread,
float * __restrict__ v,
float * __restrict__ b,
float * __restrict__ dtOmegaInvQ,
float * __restrict__ pSpace,
float * __restrict__ tmpPx1,
float * __restrict__ tmpPy1,
float * __restrict__ tmpPz1,
float * __restrict__ tmpPx2,
float * __restrict__ tmpPy2,
float * __restrict__ tmpPz2,
float * __restrict__ pOld,
float * __restrict__ pCur,
const long BX_3D,
const long BY_3D,
const long BZ_3D) {
const long nx4 = nx - 4;
const long ny4 = ny - 4;
const long nz4 = nz - 4;
#pragma omp parallel for collapse(3) num_threads(nthread) schedule(static)
for (long bx = 4; bx < nx4; bx += BX_3D) {
for (long by = 4; by < ny4; by += BY_3D) {
for (long bz = 4; bz < nz4; bz += BZ_3D) {
const long kxmax = MIN(bx + BX_3D, nx4);
const long kymax = MIN(by + BY_3D, ny4);
const long kzmax = MIN(bz + BZ_3D, nz4);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
v[k] = 0;
b[k] = 0;
dtOmegaInvQ[k] = 0;
pSpace[k] = 0;
tmpPx1[k] = 0;
tmpPy1[k] = 0;
tmpPz1[k] = 0;
tmpPx2[k] = 0;
tmpPy2[k] = 0;
tmpPz2[k] = 0;
pOld[k] = 0;
pCur[k] = 0;
}
}
}
}
}
}
// annulus
for (long k = 0; k < 4; k++) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long ky = 0; ky < ny; ky++) {
const long kindex1 = kx * ny * nz + ky * nz + k;
const long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k);
v[kindex1] = b[kindex1] = dtOmegaInvQ[kindex1] = pSpace[kindex1] =
tmpPx1[kindex1] = tmpPy1[kindex1] = tmpPz1[kindex1] =
tmpPx2[kindex1] = tmpPy2[kindex1] = tmpPz2[kindex1] =
pOld[kindex1] = pCur[kindex1] = 0;
v[kindex2] = b[kindex2] = dtOmegaInvQ[kindex2] = pSpace[kindex2] =
tmpPx1[kindex2] = tmpPy1[kindex2] = tmpPz1[kindex2] =
tmpPx2[kindex2] = tmpPy2[kindex2] = tmpPz2[kindex2] =
pOld[kindex2] = pCur[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long kindex1 = kx * ny * nz + k * nz + kz;
const long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz;
v[kindex1] = b[kindex1] = dtOmegaInvQ[kindex1] = pSpace[kindex1] =
tmpPx1[kindex1] = tmpPy1[kindex1] = tmpPz1[kindex1] =
tmpPx2[kindex1] = tmpPy2[kindex1] = tmpPz2[kindex1] =
pOld[kindex1] = pCur[kindex1] = 0;
v[kindex2] = b[kindex2] = dtOmegaInvQ[kindex2] = pSpace[kindex2] =
tmpPx1[kindex2] = tmpPy1[kindex2] = tmpPz1[kindex2] =
tmpPx2[kindex2] = tmpPy2[kindex2] = tmpPz2[kindex2] =
pOld[kindex2] = pCur[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long ky = 0; ky < ny; ky++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long kindex1 = k * ny * nz + ky * nz + kz;
const long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz;
v[kindex1] = b[kindex1] = dtOmegaInvQ[kindex1] = pSpace[kindex1] =
tmpPx1[kindex1] = tmpPy1[kindex1] = tmpPz1[kindex1] =
tmpPx2[kindex1] = tmpPy2[kindex1] = tmpPz2[kindex1] =
pOld[kindex1] = pCur[kindex1] = 0;
v[kindex2] = b[kindex2] = dtOmegaInvQ[kindex2] = pSpace[kindex2] =
tmpPx1[kindex2] = tmpPy1[kindex2] = tmpPz1[kindex2] =
tmpPx2[kindex2] = tmpPy2[kindex2] = tmpPz2[kindex2] =
pOld[kindex2] = pCur[kindex2] = 0;
}
}
}
}
~Prop3DAcoIsoDenQ_DEO2_FDTD() {
if (_v != NULL) delete [] _v;
if (_b != NULL) delete [] _b;
if (_dtOmegaInvQ != NULL) delete [] _dtOmegaInvQ;
if (_pSpace != NULL) delete [] _pSpace;
if (_tmpPx1 != NULL) delete [] _tmpPx1;
if (_tmpPy1 != NULL) delete [] _tmpPy1;
if (_tmpPz1 != NULL) delete [] _tmpPz1;
if (_tmpPx2 != NULL) delete [] _tmpPx2;
if (_tmpPy2 != NULL) delete [] _tmpPy2;
if (_tmpPz2 != NULL) delete [] _tmpPz2;
if (_pOld != NULL) delete [] _pOld;
if (_pCur != NULL) delete [] _pCur;
}
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
void info() {
printf("\n");
printf("Prop3DAcoIsoDenQ_DEO2_FDTD\n");
printf(" nx,ny,nz; %5ld %5ld %5ld\n", _nx, _ny, _nz);
printf(" nthread,nsponge,fs; %5ld %5ld %5d\n", _nthread, _nsponge, _freeSurface);
printf(" X min,max,inc; %+16.8f %+16.8f %+16.8f\n", 0.0, _dx * (_nx - 1), _dx);
printf(" Y min,max,inc; %+16.8f %+16.8f %+16.8f\n", 0.0, _dy * (_ny - 1), _dy);
printf(" Z min,max,inc; %+16.8f %+16.8f %+16.8f\n", 0.0, _dz * (_nz - 1), _dz);
}
/**
* Notes
* - User must have called setupDtOmegaInvQ_3D to initialize the array _dtOmegaInvQ
* - wavefield arrays are switched in this call
* pCur -> pOld
* pOld -> pCur
* mCur -> mOld
* mOld -> mCur
*/
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void timeStep() {
applyFirstDerivatives3D_PlusHalf_Sandwich_Isotropic(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
_pCur, _pCur, _pCur, _b, _tmpPx1, _tmpPy1, _tmpPz1, _nbx, _nby, _nbz);
applyFirstDerivatives3D_MinusHalf_TimeUpdate_Nonlinear_Isotropic(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz, _dt,
_tmpPx1, _tmpPy1, _tmpPz1, _v, _b, _dtOmegaInvQ, _pCur, _pSpace, _pOld, _nbx, _nby, _nbz);
// swap pointers
float *pswap = _pOld;
_pOld = _pCur;
_pCur = pswap;
}
/**
* Scale spatial derivatives by v^2/b to make them temporal derivs
*/
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void scaleSpatialDerivatives() {
#pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const float v2OverB = _v[k] * _v[k] / _b[k];
_pSpace[k] *= v2OverB;
}
}
}
}
}
}
}
/**
* Add the Born source for velocity only model-space at the current time
*
* User must have:
* - called the nonlinear forward
* - saved 2nd time derivative of pressure at corresponding time index in array dp2
* - Born source term will be injected into the _pCur array
*/
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void forwardBornInjection_V(Type *dVel, Type *wavefieldDP) {
#pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const Type V = _v[k];
const Type B = _b[k];
const Type dV = dVel[k];
const Type dt2v2OverB = _dt * _dt * V * V / B;
const Type factorV = 2 * B * dV / (V * V * V);
_pCur[k] += dt2v2OverB * wavefieldDP[k] * factorV;
}
}
}
}
}
}
}
/**
* Add the Born source for buoyancy and velocity model-space at the current time
*
* User must have:
* - called the nonlinear forward
* - saved 2nd time derivative of pressure at corresponding time index in array dp2
* - Born source term will be injected into the _pCur array
*
* TODO: if these second derivatice call and following loop is expensive,
* could consider fusing the two derivative loops with the final loop
*/
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void forwardBornInjection_VB(Type *dVel, Type *dBuoy, Type *wavefieldP, Type *wavefieldDP) {
applyFirstDerivatives3D_PlusHalf(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
wavefieldP, wavefieldP, wavefieldP, _tmpPx1, _tmpPy1, _tmpPz1, _nbx, _nby, _nbz);
#pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const Type dB = dBuoy[k];
_tmpPx2[k] = dB * _tmpPx1[k];
_tmpPy2[k] = dB * _tmpPy1[k];
_tmpPz2[k] = dB * _tmpPz1[k];
}
}
}
}
}
}
applyFirstDerivatives3D_MinusHalf(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
_tmpPx2, _tmpPy2, _tmpPz2, _tmpPx1, _tmpPy1, _tmpPz1, _nbx, _nby, _nbz);
#pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const Type V = _v[k];
const Type B = _b[k];
const Type dV = dVel[k];
const Type dB = dBuoy[k];
const Type dt2v2OverB = _dt * _dt * V * V / B;
const Type factorV = 2 * B * dV / (V * V * V);
const Type factorB = - dB / (V * V);
_pCur[k] += dt2v2OverB * (wavefieldDP[k] * (factorV + factorB) + _tmpPx1[k] + _tmpPy1[k] + _tmpPz1[k]);
}
}
}
}
}
}
}
/**
* Add the Born source for buoyancy only model-space at the current time
*
* User must have:
* - called the nonlinear forward
* - saved 2nd time derivative of pressure at corresponding time index in array dp2
* - Born source term will be injected into the _pCur array
*
* TODO: if these second derivatice call and following loop is expensive,
* could consider fusing the two derivative loops with the final loop
*/
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void forwardBornInjection_B(Type *dBuoy, Type *wavefieldP, Type *wavefieldDP) {
applyFirstDerivatives3D_PlusHalf(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
wavefieldP, wavefieldP, wavefieldP, _tmpPx1, _tmpPy1, _tmpPz1, _nbx, _nby, _nbz);
#pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const Type dB = dBuoy[k];
_tmpPx2[k] = dB * _tmpPx1[k];
_tmpPy2[k] = dB * _tmpPy1[k];
_tmpPz2[k] = dB * _tmpPz1[k];
}
}
}
}
}
}
applyFirstDerivatives3D_MinusHalf(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
_tmpPx2, _tmpPy2, _tmpPz2, _tmpPx1, _tmpPy1, _tmpPz1, _nbx, _nby, _nbz);
#pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const Type V = _v[k];
const Type B = _b[k];
const Type dB = dBuoy[k];
const Type dt2v2OverB = _dt * _dt * V * V / B;
const Type factorB = - dB / (V * V);
_pCur[k] += dt2v2OverB * (wavefieldDP[k] * factorB + _tmpPx1[k] + _tmpPy1[k] + _tmpPz1[k]);
}
}
}
}
}
}
}
/**
* Accumulate the Born image term at the current time for velocity only model-space
*
* User must have:
* - called the nonlinear forward
* - saved 2nd time derivative of pressure at corresponding time index in array dp2
* - Born image term will be accumulated iu the _dm array
*
* - velocity term: [+ 2B/V^3 LtP r ]
* - buoyancy term: [- 1/V^2 LtP r - dx' P dx - dy' P dy - dz' P dz]
*
* TODO: if these adjoint accumulations are expensive, could consider fusing the
* two derivative loops with the final loop
*/
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void adjointBornAccumulation_V(Type *dVel, Type *wavefieldDP) {
#pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const Type V = _v[k];
const Type B = _b[k];
const Type factorV = + 2 * B / (V * V * V);
dVel[k] += factorV * wavefieldDP[k] * _pOld[k];
}
}
}
}
}
}
}
/**
* Accumulate the Born image term at the current time for velocity and buoyancy model-space
*
* User must have:
* - called the nonlinear forward
* - saved 2nd time derivative of pressure at corresponding time index in array dp2
* - Born image term will be accumulated iu the _dm array
*
* - velocity term: [+ 2B/V^3 LtP r ]
* - buoyancy term: [- 1/V^2 LtP r - dx' P dx - dy' P dy - dz' P dz]
*/
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void adjointBornAccumulation_VB(Type *dVel, Type *dBuoy, Type *wavefieldP, Type *wavefieldDP) {
applyFirstDerivatives3D_PlusHalf(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
wavefieldP, wavefieldP, wavefieldP, _tmpPx1, _tmpPy1, _tmpPz1, _nbx, _nby, _nbz);
applyFirstDerivatives3D_PlusHalf(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
_pOld, _pOld, _pOld, _tmpPx2, _tmpPy2, _tmpPz2, _nbx, _nby, _nbz);
#pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const Type V = _v[k];
const Type B = _b[k];
const Type factorV = + 2 * B / (V * V * V);
const Type factorB = - 1 / (V * V);
dVel[k] += factorV * wavefieldDP[k] * _pOld[k];
dBuoy[k] += factorB * wavefieldDP[k] * _pOld[k]
- _tmpPx1[k] * _tmpPx2[k] - _tmpPy1[k] * _tmpPy2[k] - _tmpPz1[k] * _tmpPz2[k];
}
}
}
}
}
}
}
/**
* Accumulate the Born image term at the current time for buoyancy only model-space
*
* User must have:
* - called the nonlinear forward
* - saved 2nd time derivative of pressure at corresponding time index in array dp2
* - Born image term will be accumulated iu the _dm array
*
* - velocity term: [+ 2B/V^3 LtP r ]
* - buoyancy term: [- 1/V^2 LtP r - dx' P dx - dy' P dy - dz' P dz]
*/
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void adjointBornAccumulation_B(Type *dBuoy, Type *wavefieldP, Type *wavefieldDP) {
applyFirstDerivatives3D_PlusHalf(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
wavefieldP, wavefieldP, wavefieldP, _tmpPx1, _tmpPy1, _tmpPz1, _nbx, _nby, _nbz);
applyFirstDerivatives3D_PlusHalf(
_freeSurface, _nx, _ny, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDy, _invDz,
_pOld, _pOld, _pOld, _tmpPx2, _tmpPy2, _tmpPz2, _nbx, _nby, _nbz);
#pragma omp parallel for collapse(3) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const Type V = _v[k];
const Type B = _b[k];
const Type factorB = - 1 / (V * V);
dBuoy[k] += factorB * wavefieldDP[k] * _pOld[k]
- _tmpPx1[k] * _tmpPx2[k] - _tmpPy1[k] * _tmpPy2[k] - _tmpPz1[k] * _tmpPz2[k];
}
}
}
}
}
}
}
/**
* Apply Kz wavenumber filter for up/down wavefield seperation
* Faqi, 2011, Geophysics https://library.seg.org/doi/full/10.1190/1.3533914
*
* We handle the FWI and RTM imaging conditions with a condition inside the OMP loop
*
* Example Kz filtering with 8 samples
* frequency | +0 | +1 | +2 | +3 | N | -3 | -2 | -1 |
* original | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
* upgoing | 0 | X | X | X | 4 | 5 | 6 | 7 |
* dngoing | 0 | 1 | 2 | 3 | 4 | X | X | X |
*/
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void adjointBornAccumulation_wavefieldsep(float *dVel, float *wavefieldDP, const long isFWI) {
const long nfft = 2 * _nz;
const float scale = 1.0f / (float)(nfft);
// FWI: adj wavefield is dngoing
// RTM: adj wavefield is upgoing
const long kfft_adj = (isFWI) ? 0 : nfft / 2;
std::complex<float> * __restrict__ tmp = new std::complex<float>[nfft];
fftwf_plan planForward = fftwf_plan_dft_1d(nfft,
reinterpret_cast<fftwf_complex*>(tmp),
reinterpret_cast<fftwf_complex*>(tmp), +1, FFTW_ESTIMATE);
fftwf_plan planInverse = fftwf_plan_dft_1d(nfft,
reinterpret_cast<fftwf_complex*>(tmp),
reinterpret_cast<fftwf_complex*>(tmp), -1, FFTW_ESTIMATE);
delete [] tmp;
#pragma omp parallel num_threads(_nthread)
{
std::complex<float> * __restrict__ tmp_nlf = new std::complex<float>[nfft];
std::complex<float> * __restrict__ tmp_adj = new std::complex<float>[nfft];
#pragma omp for collapse(2) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long by = 0; by < _ny; by += _nby) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kymax = MIN(by + _nby, _ny);
for (long kx = bx; kx < kxmax; kx++) {
for (long ky = by; ky < kymax; ky++) {
#pragma omp simd
for (long kfft = 0; kfft < nfft; kfft++) {
tmp_nlf[kfft] = 0;
tmp_adj[kfft] = 0;
}
#pragma omp simd
for (long kz = 0; kz < _nz; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
tmp_nlf[kz] = scale * wavefieldDP[k];
tmp_adj[kz] = scale * _pOld[k];
}
fftwf_execute_dft(planForward,
reinterpret_cast<fftwf_complex*>(tmp_nlf),
reinterpret_cast<fftwf_complex*>(tmp_nlf));
fftwf_execute_dft(planForward,
reinterpret_cast<fftwf_complex*>(tmp_adj),
reinterpret_cast<fftwf_complex*>(tmp_adj));
// upgoing: zero the positive frequencies, excluding Nyquist
// dngoing: zero the negative frequencies, excluding Nyquist
#pragma omp simd
for (long k = 1; k < nfft / 2; k++) {
tmp_nlf[nfft / 2 + k] = 0;
tmp_adj[kfft_adj + k] = 0;
}
fftwf_execute_dft(planInverse,
reinterpret_cast<fftwf_complex*>(tmp_nlf),
reinterpret_cast<fftwf_complex*>(tmp_nlf));
fftwf_execute_dft(planInverse,
reinterpret_cast<fftwf_complex*>(tmp_adj),
reinterpret_cast<fftwf_complex*>(tmp_adj));
// Faqi eq 10
// Applied to FWI: [Sup * Rdn]
// Applied to RTM: [Sup * Rup]
for (long kz = 0; kz < _nz; kz++) {
const long k = kx * _ny * _nz + ky * _nz + kz;
const float V = _v[k];
const float B = _b[k];
const float factor = 2 * B / (V * V * V);
dVel[k] += factor * real(tmp_nlf[kz] * tmp_adj[kz]);
}
} // end loop over ky
} // end loop over kx
} // end loop over by
} // end loop over bx
delete [] tmp_nlf;
delete [] tmp_adj;
} // end parallel region
fftwf_destroy_plan(planForward);
fftwf_destroy_plan(planInverse);
}
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
static inline void applyFirstDerivatives3D_PlusHalf_Sandwich_Isotropic(
const long freeSurface,
const long nx,
const long ny,
const long nz,
const long nthread,
const Type c8_1,
const Type c8_2,
const Type c8_3,
const Type c8_4,
const Type invDx,
const Type invDy,
const Type invDz,
const Type * __restrict__ const inPX,
const Type * __restrict__ const inPY,
const Type * __restrict__ const inPZ,
const Type * __restrict__ const fieldBuoy,
Type * __restrict__ tmpPX,
Type * __restrict__ tmpPY,
Type * __restrict__ tmpPZ,
const long BX_3D,
const long BY_3D,
const long BZ_3D) {
const long nx4 = nx - 4;
const long ny4 = ny - 4;
const long nz4 = nz - 4;
const long nynz = ny * nz;
// zero output array: note only the annulus that is in the absorbing boundary needs to be zeroed
for (long k = 0; k < 4; k++) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long ky = 0; ky < ny; ky++) {
const long kindex1 = kx * ny * nz + ky * nz + k;
const long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k);
tmpPX[kindex1] = tmpPX[kindex2] = 0;
tmpPY[kindex1] = tmpPY[kindex2] = 0;
tmpPZ[kindex1] = tmpPZ[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long kindex1 = kx * ny * nz + k * nz + kz;
const long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz;
tmpPX[kindex1] = tmpPX[kindex2] = 0;
tmpPY[kindex1] = tmpPY[kindex2] = 0;
tmpPZ[kindex1] = tmpPZ[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long ky = 0; ky < ny; ky++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long kindex1 = k * ny * nz + ky * nz + kz;
const long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz;
tmpPX[kindex1] = tmpPX[kindex2] = 0;
tmpPY[kindex1] = tmpPY[kindex2] = 0;
tmpPZ[kindex1] = tmpPZ[kindex2] = 0;
}
}
}
// interior
#pragma omp parallel for collapse(3) num_threads(nthread) schedule(static)
for (long bx = 4; bx < nx4; bx += BX_3D) {
for (long by = 4; by < ny4; by += BY_3D) {
for (long bz = 4; bz < nz4; bz += BZ_3D) {
const long kxmax = MIN(bx + BX_3D, nx4);
const long kymax = MIN(by + BY_3D, ny4);
const long kzmax = MIN(bz + BZ_3D, nz4);
for (long kx = bx; kx < kxmax; kx++) {
const long kxnynz = kx * nynz;
for (long ky = by; ky < kymax; ky++) {
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kxnynz_kynz + kz;
const long kynz_kz = + kynz + kz;
const Type stencilDPx =
c8_1 * (- inPX[(kx+0) * nynz + kynz_kz] + inPX[(kx+1) * nynz + kynz_kz]) +
c8_2 * (- inPX[(kx-1) * nynz + kynz_kz] + inPX[(kx+2) * nynz + kynz_kz]) +
c8_3 * (- inPX[(kx-2) * nynz + kynz_kz] + inPX[(kx+3) * nynz + kynz_kz]) +
c8_4 * (- inPX[(kx-3) * nynz + kynz_kz] + inPX[(kx+4) * nynz + kynz_kz]);
const Type stencilDPy =
c8_1 * (- inPY[kxnynz + (ky+0) * nz + kz] + inPY[kxnynz + (ky+1) * nz + kz]) +
c8_2 * (- inPY[kxnynz + (ky-1) * nz + kz] + inPY[kxnynz + (ky+2) * nz + kz]) +
c8_3 * (- inPY[kxnynz + (ky-2) * nz + kz] + inPY[kxnynz + (ky+3) * nz + kz]) +
c8_4 * (- inPY[kxnynz + (ky-3) * nz + kz] + inPY[kxnynz + (ky+4) * nz + kz]);
const Type stencilDPz =
c8_1 * (- inPZ[kxnynz_kynz + (kz+0)] + inPZ[kxnynz_kynz + (kz+1)]) +
c8_2 * (- inPZ[kxnynz_kynz + (kz-1)] + inPZ[kxnynz_kynz + (kz+2)]) +
c8_3 * (- inPZ[kxnynz_kynz + (kz-2)] + inPZ[kxnynz_kynz + (kz+3)]) +
c8_4 * (- inPZ[kxnynz_kynz + (kz-3)] + inPZ[kxnynz_kynz + (kz+4)]);
const Type dPx = invDx * stencilDPx;
const Type dPy = invDy * stencilDPy;
const Type dPz = invDz * stencilDPz;
const Type B = fieldBuoy[k];
tmpPX[k] = B * dPx;
tmpPY[k] = B * dPy;
tmpPZ[k] = B * dPz;
}
}
}
}
}
}
// roll on free surface
if (freeSurface) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 4; kx < nx4; kx++) {
const long kxnynz = kx * nynz;
#pragma omp simd
for (long ky = 4; ky < ny4; ky++) {
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
// kz = 0 -- 1/2 cells below free surface for Z derivative, at free surface for X/Y derivative
// X and Y derivatives are identically zero
// [kxnynz_kynz + 0]
{
const Type stencilDPz0 =
c8_1 * (- inPZ[kxnynz_kynz + 0] + inPZ[kxnynz_kynz + 1]) +
c8_2 * (+ inPZ[kxnynz_kynz + 1] + inPZ[kxnynz_kynz + 2]) +
c8_3 * (+ inPZ[kxnynz_kynz + 2] + inPZ[kxnynz_kynz + 3]) +
c8_4 * (+ inPZ[kxnynz_kynz + 3] + inPZ[kxnynz_kynz + 4]);
const Type dPx = 0;
const Type dPy = 0;
const Type dPz = invDz * stencilDPz0;
const long k = kxnynz_kynz + 0;
const Type B = fieldBuoy[k];
tmpPX[k] = B * dPx;
tmpPY[k] = B * dPy;
tmpPZ[k] = B * dPz;
}
// kz = 1 -- 1 1/2 cells below free surface for Z derivative, 1 cells below for X/Y derivative
// [kxnynz_kynz + 1]
{
const Type stencilDPx1 =
c8_1 * (- inPX[(kx+0) * nynz + kynz + 1] + inPX[(kx+1) * nynz + kynz + 1]) +
c8_2 * (- inPX[(kx-1) * nynz + kynz + 1] + inPX[(kx+2) * nynz + kynz + 1]) +
c8_3 * (- inPX[(kx-2) * nynz + kynz + 1] + inPX[(kx+3) * nynz + kynz + 1]) +
c8_4 * (- inPX[(kx-3) * nynz + kynz + 1] + inPX[(kx+4) * nynz + kynz + 1]);
const Type stencilDPy1 =
c8_1 * (- inPY[kxnynz + (ky+0) * nz + 1] + inPY[kxnynz + (ky+1) * nz + 1]) +
c8_2 * (- inPY[kxnynz + (ky-1) * nz + 1] + inPY[kxnynz + (ky+2) * nz + 1]) +
c8_3 * (- inPY[kxnynz + (ky-2) * nz + 1] + inPY[kxnynz + (ky+3) * nz + 1]) +
c8_4 * (- inPY[kxnynz + (ky-3) * nz + 1] + inPY[kxnynz + (ky+4) * nz + 1]);
const Type stencilDPz1 =
c8_1 * (- inPZ[kxnynz_kynz + 1] + inPZ[kxnynz_kynz + 2]) +
c8_2 * (- inPZ[kxnynz_kynz + 0] + inPZ[kxnynz_kynz + 3]) +
c8_3 * (+ inPZ[kxnynz_kynz + 1] + inPZ[kxnynz_kynz + 4]) +
c8_4 * (+ inPZ[kxnynz_kynz + 2] + inPZ[kxnynz_kynz + 5]);
const Type dPx = invDx * stencilDPx1;
const Type dPy = invDy * stencilDPy1;
const Type dPz = invDz * stencilDPz1;
const long k = kxnynz_kynz + 1;
const Type B = fieldBuoy[k];
tmpPX[k] = B * dPx;
tmpPY[k] = B * dPy;
tmpPZ[k] = B * dPz;
}
// kz = 2 -- 2 1/2 cells below free surface for Z derivative, 2 cells below for X/Y derivative
// [kxnynz_kynz + 2]
{
const Type stencilDPx2 =
c8_1 * (- inPX[(kx+0) * nynz + kynz + 2] + inPX[(kx+1) * nynz + kynz + 2]) +
c8_2 * (- inPX[(kx-1) * nynz + kynz + 2] + inPX[(kx+2) * nynz + kynz + 2]) +
c8_3 * (- inPX[(kx-2) * nynz + kynz + 2] + inPX[(kx+3) * nynz + kynz + 2]) +
c8_4 * (- inPX[(kx-3) * nynz + kynz + 2] + inPX[(kx+4) * nynz + kynz + 2]);
const Type stencilDPy2 =
c8_1 * (- inPY[kxnynz + (ky+0) * nz + 2] + inPY[kxnynz + (ky+1) * nz + 2]) +
c8_2 * (- inPY[kxnynz + (ky-1) * nz + 2] + inPY[kxnynz + (ky+2) * nz + 2]) +
c8_3 * (- inPY[kxnynz + (ky-2) * nz + 2] + inPY[kxnynz + (ky+3) * nz + 2]) +
c8_4 * (- inPY[kxnynz + (ky-3) * nz + 2] + inPY[kxnynz + (ky+4) * nz + 2]);
const Type stencilDPz2 =
c8_1 * (- inPZ[kxnynz_kynz + 2] + inPZ[kxnynz_kynz + 3]) +
c8_2 * (- inPZ[kxnynz_kynz + 1] + inPZ[kxnynz_kynz + 4]) +
c8_3 * (- inPZ[kxnynz_kynz + 0] + inPZ[kxnynz_kynz + 5]) +
c8_4 * (+ inPZ[kxnynz_kynz + 1] + inPZ[kxnynz_kynz + 6]);
const Type dPx = invDx * stencilDPx2;
const Type dPy = invDy * stencilDPy2;
const Type dPz = invDz * stencilDPz2;
const long k = kxnynz_kynz + 2;
const Type B = fieldBuoy[k];
tmpPX[k] = B * dPx;
tmpPY[k] = B * dPy;
tmpPZ[k] = B * dPz;
}
// kz = 3 -- 3 1/2 cells below free surface for Z derivative, 3 cells below for X/Y derivative
// [kxnynz_kynz + 3]
{
const Type stencilDPx3 =
c8_1 * (- inPX[(kx+0) * nynz + kynz + 3] + inPX[(kx+1) * nynz + kynz + 3]) +
c8_2 * (- inPX[(kx-1) * nynz + kynz + 3] + inPX[(kx+2) * nynz + kynz + 3]) +
c8_3 * (- inPX[(kx-2) * nynz + kynz + 3] + inPX[(kx+3) * nynz + kynz + 3]) +
c8_4 * (- inPX[(kx-3) * nynz + kynz + 3] + inPX[(kx+4) * nynz + kynz + 3]);
const Type stencilDPy3 =
c8_1 * (- inPY[kxnynz + (ky+0) * nz + 3] + inPY[kxnynz + (ky+1) * nz + 3]) +
c8_2 * (- inPY[kxnynz + (ky-1) * nz + 3] + inPY[kxnynz + (ky+2) * nz + 3]) +
c8_3 * (- inPY[kxnynz + (ky-2) * nz + 3] + inPY[kxnynz + (ky+3) * nz + 3]) +
c8_4 * (- inPY[kxnynz + (ky-3) * nz + 3] + inPY[kxnynz + (ky+4) * nz + 3]);
const Type stencilDPz3 =
c8_1 * (- inPZ[kxnynz_kynz + 3] + inPZ[kxnynz_kynz + 4]) +
c8_2 * (- inPZ[kxnynz_kynz + 2] + inPZ[kxnynz_kynz + 5]) +
c8_3 * (- inPZ[kxnynz_kynz + 1] + inPZ[kxnynz_kynz + 6]) +
c8_4 * (- inPZ[kxnynz_kynz + 0] + inPZ[kxnynz_kynz + 7]);
const Type dPx = invDx * stencilDPx3;
const Type dPy = invDy * stencilDPy3;
const Type dPz = invDz * stencilDPz3;
const long k = kxnynz_kynz + 3;
const Type B = fieldBuoy[k];
tmpPX[k] = B * dPx;
tmpPY[k] = B * dPy;
tmpPZ[k] = B * dPz;
}
}
}
}
}
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
static inline void applyFirstDerivatives3D_MinusHalf_TimeUpdate_Nonlinear_Isotropic(
const long freeSurface,
const long nx,
const long ny,
const long nz,
const long nthread,
const Type c8_1,
const Type c8_2,
const Type c8_3,
const Type c8_4,
const Type invDx,
const Type invDy,
const Type invDz,
const Type dtMod,
const Type * __restrict__ const tmpPX,
const Type * __restrict__ const tmpPY,
const Type * __restrict__ const tmpPZ,
const Type * __restrict__ const fieldVel,
const Type * __restrict__ const fieldBuoy,
const Type * __restrict__ const dtOmegaInvQ,
const Type * __restrict__ const pCur,
Type * __restrict__ tmpPout,
Type * __restrict__ pOld,
const long BX_3D,
const long BY_3D,
const long BZ_3D) {
const long nx4 = nx - 4;
const long ny4 = ny - 4;
const long nz4 = nz - 4;
const long nynz = ny * nz;
const Type dt2 = dtMod * dtMod;
// zero output array: note only the annulus that is in the absorbing boundary needs to be zeroed
for (long k = 0; k < 4; k++) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long ky = 0; ky < ny; ky++) {
const long kindex1 = kx * ny * nz + ky * nz + k;
const long kindex2 = kx * ny * nz + ky * nz + (nz - 1 - k);
tmpPout[kindex1] = tmpPout[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long kindex1 = kx * ny * nz + k * nz + kz;
const long kindex2 = kx * ny * nz + (ny - 1 - k) * nz + kz;
tmpPout[kindex1] = tmpPout[kindex2] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long ky = 0; ky < ny; ky++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long kindex1 = k * ny * nz + ky * nz + kz;
const long kindex2 = (nx - 1 - k) * ny * nz + ky * nz + kz;
tmpPout[kindex1] = tmpPout[kindex2] = 0;
}
}
}
// interior
#pragma omp parallel for collapse(3) num_threads(nthread) schedule(static)
for (long bx = 4; bx < nx4; bx += BX_3D) {
for (long by = 4; by < ny4; by += BY_3D) {
for (long bz = 4; bz < nz4; bz += BZ_3D) {
const long kxmax = MIN(bx + BX_3D, nx4);
const long kymax = MIN(by + BY_3D, ny4);
const long kzmax = MIN(bz + BZ_3D, nz4);
for (long kx = bx; kx < kxmax; kx++) {
const long kxnynz = kx * nynz;
for (long ky = by; ky < kymax; ky++) {
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kxnynz_kynz + kz;
const long kynz_kz = + kynz + kz;
const Type stencilDPx =
c8_1 * (- tmpPX[(kx-1) * nynz + kynz_kz] + tmpPX[(kx+0) * nynz + kynz_kz]) +
c8_2 * (- tmpPX[(kx-2) * nynz + kynz_kz] + tmpPX[(kx+1) * nynz + kynz_kz]) +
c8_3 * (- tmpPX[(kx-3) * nynz + kynz_kz] + tmpPX[(kx+2) * nynz + kynz_kz]) +
c8_4 * (- tmpPX[(kx-4) * nynz + kynz_kz] + tmpPX[(kx+3) * nynz + kynz_kz]);
const Type stencilDPy =
c8_1 * (- tmpPY[kxnynz + (ky-1) * nz + kz] + tmpPY[kxnynz + (ky+0) * nz + kz]) +
c8_2 * (- tmpPY[kxnynz + (ky-2) * nz + kz] + tmpPY[kxnynz + (ky+1) * nz + kz]) +
c8_3 * (- tmpPY[kxnynz + (ky-3) * nz + kz] + tmpPY[kxnynz + (ky+2) * nz + kz]) +
c8_4 * (- tmpPY[kxnynz + (ky-4) * nz + kz] + tmpPY[kxnynz + (ky+3) * nz + kz]);
const Type stencilDPz =
c8_1 * (- tmpPZ[kxnynz_kynz + (kz-1)] + tmpPZ[kxnynz_kynz + (kz+0)]) +
c8_2 * (- tmpPZ[kxnynz_kynz + (kz-2)] + tmpPZ[kxnynz_kynz + (kz+1)]) +
c8_3 * (- tmpPZ[kxnynz_kynz + (kz-3)] + tmpPZ[kxnynz_kynz + (kz+2)]) +
c8_4 * (- tmpPZ[kxnynz_kynz + (kz-4)] + tmpPZ[kxnynz_kynz + (kz+3)]);
const Type dPx = invDx * stencilDPx;
const Type dPy = invDy * stencilDPy;
const Type dPz = invDz * stencilDPz;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pOld[k] = dt2V2_B * (dPx + dPy + dPz) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
tmpPout[k] = dPx + dPy + dPz;
}
}
}
}
}
}
// roll on free surface
if (freeSurface) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 4; kx < nx4; kx++) {
const long kxnynz = kx * nynz;
#pragma omp simd
for (long ky = 4; ky < ny4; ky++) {
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
// kz = 0 -- at the free surface -- p = 0
// [kxnynz_kynz + 0]
{
const Type dPx = 0;
const Type dPy = 0;
const Type dPz = 0;
const long k = kxnynz_kynz + 0;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pOld[k] = dt2V2_B * (dPx + dPy + dPz) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
tmpPout[k] = dPx + dPy + dPz;
}
// kz = 1 -- one cell below the free surface
// [kxnynz_kynz + 1]
{
const Type stencilDPx1 =
c8_1 * (- tmpPX[(kx-1) * nynz + kynz + 1] + tmpPX[(kx+0) * nynz + kynz + 1]) +
c8_2 * (- tmpPX[(kx-2) * nynz + kynz + 1] + tmpPX[(kx+1) * nynz + kynz + 1]) +
c8_3 * (- tmpPX[(kx-3) * nynz + kynz + 1] + tmpPX[(kx+2) * nynz + kynz + 1]) +
c8_4 * (- tmpPX[(kx-4) * nynz + kynz + 1] + tmpPX[(kx+3) * nynz + kynz + 1]);
const Type stencilDPy1 =
c8_1 * (- tmpPY[kxnynz + (ky-1) * nz + 1] + tmpPY[kxnynz + (ky+0) * nz + 1]) +
c8_2 * (- tmpPY[kxnynz + (ky-2) * nz + 1] + tmpPY[kxnynz + (ky+1) * nz + 1]) +
c8_3 * (- tmpPY[kxnynz + (ky-3) * nz + 1] + tmpPY[kxnynz + (ky+2) * nz + 1]) +
c8_4 * (- tmpPY[kxnynz + (ky-4) * nz + 1] + tmpPY[kxnynz + (ky+3) * nz + 1]);
const Type stencilDPz1 =
c8_1 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 1]) +
c8_2 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 2]) +
c8_3 * (- tmpPZ[kxnynz_kynz + 1] + tmpPZ[kxnynz_kynz + 3]) +
c8_4 * (- tmpPZ[kxnynz_kynz + 2] + tmpPZ[kxnynz_kynz + 4]);
const Type dPx = invDx * stencilDPx1;
const Type dPy = invDy * stencilDPy1;
const Type dPz = invDz * stencilDPz1;
const long k = kxnynz_kynz + 1;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pOld[k] = dt2V2_B * (dPx + dPy + dPz) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
tmpPout[k] = dPx + dPy + dPz;
}
// kz = 2 -- two cells below the free surface
// [kxnynz_kynz + 2]
{
const Type stencilDPx2 =
c8_1 * (- tmpPX[(kx-1) * nynz + kynz + 2] + tmpPX[(kx+0) * nynz + kynz + 2]) +
c8_2 * (- tmpPX[(kx-2) * nynz + kynz + 2] + tmpPX[(kx+1) * nynz + kynz + 2]) +
c8_3 * (- tmpPX[(kx-3) * nynz + kynz + 2] + tmpPX[(kx+2) * nynz + kynz + 2]) +
c8_4 * (- tmpPX[(kx-4) * nynz + kynz + 2] + tmpPX[(kx+3) * nynz + kynz + 2]);
const Type stencilDPy2 =
c8_1 * (- tmpPY[kxnynz + (ky-1) * nz + 2] + tmpPY[kxnynz + (ky+0) * nz + 2]) +
c8_2 * (- tmpPY[kxnynz + (ky-2) * nz + 2] + tmpPY[kxnynz + (ky+1) * nz + 2]) +
c8_3 * (- tmpPY[kxnynz + (ky-3) * nz + 2] + tmpPY[kxnynz + (ky+2) * nz + 2]) +
c8_4 * (- tmpPY[kxnynz + (ky-4) * nz + 2] + tmpPY[kxnynz + (ky+3) * nz + 2]);
const Type stencilDPz2 =
c8_1 * (- tmpPZ[kxnynz_kynz + 1] + tmpPZ[kxnynz_kynz + 2]) +
c8_2 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 3]) +
c8_3 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 4]) +
c8_4 * (- tmpPZ[kxnynz_kynz + 1] + tmpPZ[kxnynz_kynz + 5]);
const Type dPx = invDx * stencilDPx2;
const Type dPy = invDy * stencilDPy2;
const Type dPz = invDz * stencilDPz2;
const long k = kxnynz_kynz + 2;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pOld[k] = dt2V2_B * (dPx + dPy + dPz) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
tmpPout[k] = dPx + dPy + dPz;
}
// kz = 3 -- three cells below the free surface
// [kxnynz_kynz + 3]
{
const Type stencilDPx3 =
c8_1 * (- tmpPX[(kx-1) * nynz + kynz + 3] + tmpPX[(kx+0) * nynz + kynz + 3]) +
c8_2 * (- tmpPX[(kx-2) * nynz + kynz + 3] + tmpPX[(kx+1) * nynz + kynz + 3]) +
c8_3 * (- tmpPX[(kx-3) * nynz + kynz + 3] + tmpPX[(kx+2) * nynz + kynz + 3]) +
c8_4 * (- tmpPX[(kx-4) * nynz + kynz + 3] + tmpPX[(kx+3) * nynz + kynz + 3]);
const Type stencilDPy3 =
c8_1 * (- tmpPY[kxnynz + (ky-1) * nz + 3] + tmpPY[kxnynz + (ky+0) * nz + 3]) +
c8_2 * (- tmpPY[kxnynz + (ky-2) * nz + 3] + tmpPY[kxnynz + (ky+1) * nz + 3]) +
c8_3 * (- tmpPY[kxnynz + (ky-3) * nz + 3] + tmpPY[kxnynz + (ky+2) * nz + 3]) +
c8_4 * (- tmpPY[kxnynz + (ky-4) * nz + 3] + tmpPY[kxnynz + (ky+3) * nz + 3]);
const Type stencilDPz3 =
c8_1 * (- tmpPZ[kxnynz_kynz + 2] + tmpPZ[kxnynz_kynz + 3]) +
c8_2 * (- tmpPZ[kxnynz_kynz + 1] + tmpPZ[kxnynz_kynz + 4]) +
c8_3 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 5]) +
c8_4 * (- tmpPZ[kxnynz_kynz + 0] + tmpPZ[kxnynz_kynz + 6]);
const Type dPx = invDx * stencilDPx3;
const Type dPy = invDy * stencilDPy3;
const Type dPz = invDz * stencilDPz3;
const long k = kxnynz_kynz + 3;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
tmpPout[k] = dPx + dPy + dPz;
pOld[k] = dt2V2_B * (dPx + dPy + dPz) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
}
}
}
}
}
};
#endif
|
rose_heat_serial_OpenMP.c | #include <omp.h>
/* This code si contributed by Richard T. Evans at the Texas Advanced computing Center
* The University of Texas at Austin
*
* To compile: icc -o heat heat_serial.c calc_up.c
*/
#include <stdio.h>
#include <sys/time.h>
#include "calc_up.h"
int main()
{
int Nx;
int Ny;
int Nt;
int t;
int x;
int y;
Nx = 1000;
Ny = 1000;
Nt = 1000;
double u[Nx][Ny];
double up[Nx][Ny];
struct timeval start;
struct timeval end;
float delta;
// Boundary conditions
for (x = 0; x < Nx; x++)
for (y = 0; y < Ny; y++) {
if (x == 0)
u[x][y] = 1.0;
else
u[x][y] = 0.0;
}
gettimeofday(&start,0);
////////////////////////////////////////////////////////////////////////
// Finite difference algorithm - iterate over time to reach steady state
////////////////////////////////////////////////////////////////////////
for (t = 0; t < Nt; t++) {
#pragma omp parallel default(none) shared(u,up,Nx,Ny) private(x,y)
{
#pragma omp for
for (x = 1; x < (Nx - 1); x++)
for (y = 1; y < (Ny - 1); y++)
calc_up(x,y,Nx,Ny,u,up);
}
#pragma omp parallel default(none) shared(u,up,Nx,Ny) private(x,y)
{
#pragma omp for
for (x = 1; x < (Nx - 1); x++)
for (y = 1; y < (Ny - 1); y++)
u[x][y] = up[x][y];
}
}
gettimeofday(&end,0);
delta = (((((end.tv_sec - start.tv_sec) * 1000000u) + end.tv_usec) - start.tv_usec) / 1.e6);
double sum = 0;
for (y = 0; y < Ny; y++) {
for (x = 0; x < Nx; x++) {
sum += u[x][y];
}
}
printf("run time = %fs\n",delta);
printf("sum of u = %f\n",sum);
return 0;
}
|
lis_precon_is.c | /* Copyright (C) 2002-2012 The SSI Project. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the project nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE
PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "lis_config.h"
#else
#ifdef HAVE_CONFIG_WIN32_H
#include "lis_config_win32.h"
#endif
#endif
#include <stdio.h>
#include <stdlib.h>
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
#include <string.h>
#include <stdarg.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "lislib.h"
#undef __FUNC__
#define __FUNC__ "lis_precon_create_is"
LIS_INT lis_precon_create_is(LIS_SOLVER solver, LIS_PRECON precon)
{
LIS_INT err;
LIS_INT k,nsol;
LIS_MATRIX A,B;
LIS_DEBUG_FUNC_IN;
k = solver->options[LIS_OPTIONS_ISLEVEL];
nsol = solver->options[LIS_OPTIONS_SOLVER];
if( k!=0 && (nsol<LIS_SOLVER_JACOBI || nsol>LIS_SOLVER_SOR) )
{
lis_psolve_xxx[LIS_PRECON_TYPE_IS] = lis_psolve_is;
lis_psolvet_xxx[LIS_PRECON_TYPE_IS] = lis_psolvet_is;
if( solver->A->matrix_type!=LIS_MATRIX_CRS )
{
A = solver->A;
err = lis_matrix_duplicate(A,&B);
if( err ) return err;
lis_matrix_set_type(B,LIS_MATRIX_CRS);
err = lis_matrix_convert(A,B);
if( err ) return err;
lis_matrix_storage_destroy(A);
lis_matrix_DLU_destroy(A);
lis_matrix_diag_destroy(A->WD);
if( A->l2g_map ) lis_free( A->l2g_map );
if( A->commtable ) lis_commtable_destroy( A->commtable );
if( A->ranges ) lis_free( A->ranges );
err = lis_matrix_copy_struct(B,A);
if( err ) return err;
lis_free(B);
}
err = lis_matrix_split(solver->A);
if( err )
{
return err;
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
else
{
lis_psolve_xxx[LIS_PRECON_TYPE_IS] = lis_psolve_none;
lis_psolvet_xxx[LIS_PRECON_TYPE_IS] = lis_psolvet_none;
}
switch( solver->A->matrix_type )
{
case LIS_MATRIX_CRS:
err = lis_matrix_split(solver->A);
if( err )
{
return err;
}
err = lis_precon_create_is_crs(solver,precon);
break;
default:
A = solver->A;
err = lis_matrix_duplicate(A,&B);
if( err ) return err;
lis_matrix_set_type(B,LIS_MATRIX_CRS);
err = lis_matrix_convert(A,B);
if( err ) return err;
lis_matrix_storage_destroy(A);
lis_matrix_DLU_destroy(A);
lis_matrix_diag_destroy(A->WD);
if( A->l2g_map ) lis_free( A->l2g_map );
if( A->commtable ) lis_commtable_destroy( A->commtable );
if( A->ranges ) lis_free( A->ranges );
err = lis_matrix_copy_struct(B,A);
if( err ) return err;
lis_free(B);
err = lis_matrix_split(solver->A);
if( err )
{
return err;
}
err = lis_precon_create_is_crs(solver,precon);
break;
}
/*
err = lis_matrix_diag_duplicate(precon->A->D,&precon->A->WD);
if( err ) return err;
lis_matrix_diag_copy(precon->A->D,precon->A->WD);
*/
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_precon_create_is_crs"
LIS_INT lis_precon_create_is_crs(LIS_SOLVER solver, LIS_PRECON precon)
{
LIS_INT i,j,k,m;
LIS_INT n,gn,ja,jj,jb,jcol,jpos,err;
LIS_INT nnzl,nnzu,kl,ku;
LIS_INT *iw,*iw2;
LIS_INT *lptr,*lindex,*uptr,*uindex;
LIS_INT n2;
LIS_SCALAR val,t;
LIS_SCALAR w;
LIS_SCALAR *lvalue,*uvalue,*diag;
LIS_MATRIX A,P;
LIS_VECTOR b,pb;
LIS_Comm comm;
LIS_DEBUG_FUNC_IN;
A = solver->A;
n = A->n;
gn = A->gn;
w = solver->params[LIS_PARAMS_ALPHA-LIS_OPTIONS_LEN];
m = solver->options[LIS_OPTIONS_M] + 1;
comm = A->comm;
b = solver->b;
err = lis_matrix_create(comm,&P);
err = lis_matrix_set_size(P,n,0);
err = lis_matrix_diag_mallocM(A,&diag);
err = lis_vector_duplicate(b,&pb);
lptr = (LIS_INT *)lis_malloc((n+1)*sizeof(LIS_INT),"lis_precon_create_is_crs::lptr");
uptr = (LIS_INT *)lis_malloc((n+1)*sizeof(LIS_INT),"lis_precon_create_is_crs::uptr");
iw = (LIS_INT *)lis_malloc( 2*gn*sizeof(LIS_INT),"lis_precon_create_is_crs::iw" );
memset( iw,0,gn*sizeof(LIS_INT) );
iw2 = iw + gn;
/*
* C <- (I+S)A
*/
for(i=0;i<n;i++)
{
k = 0;
nnzl = 0;
nnzu = 0;
n2 = _min(A->U->ptr[i]+m,A->U->ptr[i+1]);
/*
* I*A
*/
for(jb=A->L->ptr[i];jb<A->L->ptr[i+1];jb++)
{
jcol = A->L->index[jb];
iw2[k++] = jcol;
iw[jcol] = k;
nnzl++;
}
for(jb=A->U->ptr[i];jb<A->U->ptr[i+1];jb++)
{
jcol = A->U->index[jb];
iw2[k++] = jcol;
iw[jcol] = k;
nnzu++;
}
/*
* S*A
*/
for(ja=A->U->ptr[i];ja<n2;ja++)
{
jj = A->U->index[ja];
#if USE_MPI
if( jj>=n ) break;
#endif
for(jb=A->L->ptr[jj];jb<A->L->ptr[jj+1];jb++)
{
jcol = A->L->index[jb];
jpos = iw[jcol];
if( jpos==0 )
{
iw2[k++] = jcol;
iw[jcol] = k;
if( jcol<i )
{
nnzl++;
}
else if( jcol>i )
{
nnzu++;
}
}
}
jpos = iw[jj];
if( jpos==0 )
{
iw2[k++] = jj;
iw[jj] = k;
nnzu++;
}
for(jb=A->U->ptr[jj];jb<A->U->ptr[jj+1];jb++)
{
jcol = A->U->index[jb];
jpos = iw[jcol];
if( jpos==0 )
{
iw2[k++] = jcol;
iw[jcol] = k;
nnzu++;
}
}
}
lptr[i+1] = nnzl;
uptr[i+1] = nnzu;
for(j=0;j<k;j++)
{
iw[iw2[j]] = 0;
}
}
lptr[0] = 0;
uptr[0] = 0;
for(i=0;i<n;i++)
{
lptr[i+1] += lptr[i];
uptr[i+1] += uptr[i];
}
nnzl = lptr[n];
nnzu = uptr[n];
lindex = (LIS_INT *)lis_malloc(nnzl*sizeof(LIS_INT),"lis_precon_create_is_crs::lindex");
lvalue = (LIS_SCALAR *)lis_malloc(nnzl*sizeof(LIS_SCALAR),"lis_precon_create_is_crs::lvalue");
uindex = (LIS_INT *)lis_malloc(nnzu*sizeof(LIS_INT),"lis_precon_create_is_crs::uindex");
uvalue = (LIS_SCALAR *)lis_malloc(nnzu*sizeof(LIS_SCALAR),"lis_precon_create_is_crs::uvalue");
memset( iw,0,gn*sizeof(LIS_INT) );
/*
* C <- (I+S)A
*/
for(i=0;i<n;i++) diag[i] = A->D->value[i];
for(i=0;i<n;i++)
{
kl = lptr[i];
ku = uptr[i];
n2 = _min(A->U->ptr[i]+m,A->U->ptr[i+1]);
/*
* I*A
*/
val = A->D->value[i];
t = val * b->value[i];
for(jb=A->L->ptr[i];jb<A->L->ptr[i+1];jb++)
{
jcol = A->L->index[jb];
lindex[kl] = jcol;
lvalue[kl++] = val * A->L->value[jb];
iw[jcol] = kl;
}
for(jb=A->U->ptr[i];jb<A->U->ptr[i+1];jb++)
{
jcol = A->U->index[jb];
uindex[ku] = jcol;
uvalue[ku++] = val * A->U->value[jb];
iw[jcol] = ku;
}
/*
* S*A
*/
for(ja=A->U->ptr[i];ja<n2;ja++)
{
jj = A->U->index[ja];
#if USE_MPI
if( jj>=n ) break;
#endif
val = -w * A->U->value[ja];
t += val * b->value[jj];
for(jb=A->L->ptr[jj];jb<A->L->ptr[jj+1];jb++)
{
jcol = A->L->index[jb];
if( jcol<i )
{
jpos = iw[jcol];
if( jpos==0 )
{
lindex[kl] = jcol;
lvalue[kl++] = val * A->L->value[jb];
iw[jcol] = kl;
}
else
{
lvalue[jpos-1] += val * A->L->value[jb];
}
}
else if( jcol>i )
{
jpos = iw[jcol];
if( jpos==0 )
{
uindex[ku] = jcol;
uvalue[ku++] = val * A->L->value[jb];
iw[jcol] = ku;
}
else
{
uvalue[jpos-1] += val * A->L->value[jb];
}
}
else
{
diag[i] += val * A->L->value[jb];
}
}
jpos = iw[jj];
if( jpos==0 )
{
uindex[ku] = jj;
uvalue[ku++] = val * A->D->value[jj];
iw[jj] = ku;
}
else
{
uvalue[jpos-1] += val * A->D->value[jj];
}
for(jb=A->U->ptr[jj];jb<A->U->ptr[jj+1];jb++)
{
jcol = A->U->index[jb];
jpos = iw[jcol];
if( jpos==0 )
{
uindex[ku] = jcol;
uvalue[ku++] = val * A->U->value[jb];
iw[jcol] = ku;
}
else
{
uvalue[jpos-1] += val * A->U->value[jb];
}
}
}
for(j=lptr[i];j<kl;j++)
{
iw[lindex[j]] = 0;
}
for(j=uptr[i];j<ku;j++)
{
iw[uindex[j]] = 0;
}
pb->value[i] = t;
}
lis_matrix_setDLU_crs(lptr[n],uptr[n],diag,lptr,lindex,lvalue,uptr,uindex,uvalue,P);
lis_matrix_merge_crs(P);
lis_matrix_assemble(P);
precon->A = P;
precon->Pb = pb;
precon->is_copy = LIS_TRUE;
lis_free(iw);
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_psolve_is"
LIS_INT lis_psolve_is(LIS_SOLVER solver, LIS_VECTOR X, LIS_VECTOR Y)
{
LIS_MATRIX A;
LIS_INT i,j,jj,n,m;
LIS_SCALAR t;
LIS_SCALAR w;
LIS_SCALAR *y,*x;
/*
* y = M^{-1}x
* M^{-1} = (I+S)
*/
LIS_DEBUG_FUNC_IN;
A = solver->A;
n = A->n;
w = solver->params[LIS_PARAMS_ALPHA-LIS_OPTIONS_LEN];
m = solver->options[LIS_OPTIONS_M] + 1;
y = Y->value;
x = X->value;
#ifdef USE_MPI
LIS_MATVEC_SENDRECV;
#endif
#ifdef _OPENMP
#pragma omp parallel private(i,jj,j,t)
#endif
{
#ifdef _OPENMP
#pragma omp for
#endif
for(i=0;i<n;i++)
{
t = 0.0;
for(j=A->U->ptr[i];j<_min(A->U->ptr[i]+m,A->U->ptr[i+1]);j++)
{
jj = A->U->index[j];
t += A->U->value[j] * x[jj];
}
y[i] = x[i] - w*t;
}
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_psolvet_is"
LIS_INT lis_psolvet_is(LIS_SOLVER solver, LIS_VECTOR X, LIS_VECTOR Y)
{
LIS_MATRIX A;
LIS_INT i,j,jj,n,m,np;
LIS_SCALAR t;
LIS_SCALAR w;
LIS_SCALAR *y,*x;
#ifdef _OPENMP
LIS_INT k,nprocs;
LIS_SCALAR *tmp;
#endif
/*
* y = M^{-1}x
* M^{-1} = (I+S)
*/
LIS_DEBUG_FUNC_IN;
A = solver->A;
n = A->n;
np = A->np;
w = solver->params[LIS_PARAMS_ALPHA-LIS_OPTIONS_LEN];
m = solver->options[LIS_OPTIONS_M] + 1;
y = Y->value;
x = X->value;
#ifdef _OPENMP
nprocs = omp_get_max_threads();
tmp = (LIS_SCALAR *)lis_malloc( nprocs*np*sizeof(LIS_SCALAR),"lis_psolvet_is::tmp" );
#pragma omp parallel private(i,j,t,jj,k)
{
k = omp_get_thread_num();
#pragma omp for
for(j=0;j<nprocs;j++)
{
memset( &tmp[j*np], 0, np*sizeof(LIS_SCALAR) );
}
#pragma omp for
for(i=0; i<n; i++)
{
t = x[i];
for(j=A->U->ptr[i];j<_min(A->U->ptr[i]+m,A->U->ptr[i+1]);j++)
{
jj = k*np+A->U->index[j];
tmp[jj] += w*A->U->value[j] * t;
}
}
#pragma omp for
for(i=0;i<np;i++)
{
t = 0.0;
for(j=0;j<nprocs;j++)
{
t += tmp[j*np+i];
}
y[i] = x[i] - t;
}
}
lis_free(tmp);
#else
for(i=0; i<np; i++)
{
y[i] = x[i];
}
for(i=0; i<n; i++)
{
t = x[i];
for(j=A->U->ptr[i];j<_min(A->U->ptr[i]+m,A->U->ptr[i+1]);j++)
{
jj = A->U->index[j];
y[jj] -= w*A->U->value[j] * t;
}
}
#endif
#ifdef USE_MPI
LIS_MATVEC_REDUCE;
#endif
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
|
ops.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#pragma once
#ifndef OPS_H_
#define OPS_H_
#include <op_boilerplate.h>
#include <array/DataTypeUtils.h>
#include <helpers/shape.h>
#include <vector>
#include <Environment.h>
#include <loops/summarystatsreduce.h>
#define MIN 1e-12
#define MAX_FLOAT 1e37
#define MIN_FLOAT 1e-37
#define MAX_INT 2147483647
#define MIN_CUTFOFF -3.79297773665f
#define FLOAT_MIN_NORMAL 1.17549435e-38
#define EPS 1e-5
#define AFFINITY close
#define DOUBLE_PI_T T(2.0 * 3.14159265358979323846)
#define DOUBLE_PI_X X(2.0 * 3.14159265358979323846)
#define no_op_exec_special_any static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_bool static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_same static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, Z *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#define no_op_exec_special_accumulation_long static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#define no_op_exec_special_accumulation_same static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#ifdef __CUDACC__
#include <helpers/sharedmem.h>
#define no_op_exec_special_any_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_bool_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_same_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, X *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer,Z *result, Nd4jLong *resultShapeBuffer,Z *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_same_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, X *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_long_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#else
// hacky fix for isnan/being being out of scope
//#ifdef IOS
//#define isinf(x) 0 // this isn't right. But std::isinf fails
//#define isnan(x) 0
//#else
//#define isnan std::isnan
//#define isinf std::isinf
//#endif
#define no_op_exec_special_cuda
#define no_op_exec_special_accumulation_cuda
#define no_op_exec_special_accumulation_same_cuda
#define no_op_exec_special_accumulation_long_cuda
#define no_op_exec_special_any_cuda
#define no_op_exec_special_bool_cuda
#define no_op_exec_special_same_cuda
#define no_op_exec_special_accumulation_same_cuda
#endif
#define SELU_ALPHA 1.6732632423543772848170429916717
#define SELU_LAMBDA 1.0507009873554804934193349852946
#ifdef _OPENMP
#pragma omp declare reduction(maxT : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=-MAX_FLOAT)
#pragma omp declare reduction(minT : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=MAX_FLOAT)
#pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in + omp_out)\
initializer (omp_priv=0)
#endif
namespace functions {
namespace indexreduce {
template <typename T>
struct IndexValue {
T value;
Nd4jLong index;
_CUDA_HD IndexValue() = default;
_CUDA_HD IndexValue(const T val, const Nd4jLong ind): index(ind), value(val) {}
};
}
namespace summarystats {
template <typename T>
class SummaryStatsData;
}
}
namespace simdOps {
template <typename X, typename Y, typename Z>
class Add {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 + d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 + d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 + params[0]);
}
op_def static X startingValue() {
return static_cast<X>(0.f);
}
};
template <typename X, typename Y>
class NewAdd {
public:
op_def static X op(X d1, Y d2, X *params) {
return d1 + d2;
}
};
template <typename X, typename Y, typename Z>
class Subtract {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 - d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 - d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 - params[0]);
}
};
template <typename X, typename Y, typename Z>
class SquaredSubtract {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_pow<Z, float, Z>(static_cast<Z>(d1 - d2), 2.f);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_pow<Z, float, Z>(static_cast<Z>(d1 - d2), 2.f);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_pow<Z, float, Z>(static_cast<Z>(d1 - params[0]), 2.f);
}
};
template <typename X, typename Y, typename Z>
class ReverseSubtract {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 - d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2 - d1);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(params[0] - d1);
}
};
template <typename X, typename Y, typename Z>
class LogPoisonLossFull {
public:
op_def static Z op(X z, Y c) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)));
}
op_def static Z op(X z, Y c, Z *params) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)));
}
op_def static Z op(X z) {
auto zz = static_cast<Z>(z);
return (zz * nd4j::math::nd4j_log<Y, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz));
}
// op for MetaOps
op_def static X op(X z, Y *params) {
return (nd4j::math::nd4j_exp<X, X>(params[0]) - z * params[0] + (z * nd4j::math::nd4j_log<X, Z>(z) - z + static_cast<X>(0.5f) * nd4j::math::nd4j_log<X, Z>(DOUBLE_PI_X * z)));
}
};
template <typename X, typename Y, typename Z>
class LogPoisonLoss {
public:
op_def static Z op(X z, Y c) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc);
}
op_def static Z op(X z, Y c, Z *params) {
auto zz = static_cast<Z>(z);
auto zc = static_cast<Z>(c);
return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc);
}
op_def static Z op(X z) {
return static_cast<Z>(z);
}
// op for MetaOps
op_def static Z op(X z, Y *params) {
return (nd4j::math::nd4j_exp<Y, Z>(params[0]) - static_cast<Z>(z) * static_cast<Z>(params[0]));
}
};
template <typename X, typename Y, typename Z>
class Multiply {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 * d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 * d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 * params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1.f);
}
};
template <typename X, typename Y, typename Z>
class Divide {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1 / params[0]);
}
op_def static X startingValue() {
return static_cast<X>(1);
}
};
template <typename X, typename Y, typename Z>
class SafeDivide {
public:
op_def static Z op(X d1, Y d2) {
if(d2 == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
if(d2 == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
if(params[0] == static_cast<Y>(0))
return static_cast<Z>(0);
return static_cast<Z>(d1 / params[0]);
}
};
template <typename X, typename Y, typename Z>
class FloorDiv {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2));
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2));
}
op_def static Z op(X d1) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1));
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / params[0]));
}
};
template <typename X, typename Y, typename Z>
class TruncateDiv {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 / i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 / i2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<Z>(i1 / i2);
}
};
template <typename X, typename Y, typename Z>
class TruncateMod {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 % i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<Z>(i1 % i2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<Z>(i1 % i2);
}
};
template<typename X, typename Y, typename Z>
class Remainder {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_remainder<X, Y, Z>(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class FMod {
public:
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return nd4j::math::nd4j_fmod<X, Y, Z>(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class FloorMod {
public:
op_def static Z op(X d1, Y d2) {
auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
return (d1 < static_cast<X>(0)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2);
return (d1 < static_cast<X>(0.0f)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class ReverseDivide {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 / d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2 / d1);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(params[0] / d1);
}
};
template <typename X, typename Y, typename Z>
class CopyPws {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1);
}
};
template <typename X>
class Copy {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class Copy2 {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Y, typename Z>
class Axpy {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2 + d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
auto alpha = params[0];
return alpha * static_cast<Z>(d1) + static_cast<Z>(d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Z>
class Assign {
public:
no_op_exec_special_any
no_op_exec_special_any_cuda
op_def static Z op(X d1, X *params) {
return static_cast<Z>(d1);
}
};
template <typename X, typename Z>
class And {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return d1 != comp && d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return (b1 && b2) ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, X *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Z>
class Or {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return d1 != comp || d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return b1 || b2 ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, X *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Z>
class Xor {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return d2 + d1;
}
op_def static Z op(X d1, X d2, X *params) {
if (params != nullptr) {
auto comp = params[0];
return ((d1 == comp && d2 != comp) || (d1 != comp && d2 == comp)) ? static_cast<Z>(1) : static_cast<Z>(0);
} else {
auto b1 = static_cast<bool>(d1);
auto b2 = static_cast<bool>(d2);
return (!b1 && b2 )||(b1 && !b2) ? static_cast<Z>(1) : static_cast<Z>(0);
}
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Z>
class Not {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
op_def static Z op(X d1, X d2) {
return static_cast<Z>(0);
}
op_def static Z op(X d1, X d2, X *params) {
return d1 != d2 ? static_cast<Z>(1) : static_cast<Z>(0);
}
// this transform op should run only on boolean input
op_def static Z op(X d1, X *params) {
auto b1 = static_cast<bool>(d1);
return !b1;
}
};
template <typename X, typename Y, typename Z>
class LogicalNot {
public:
op_def static Z op(X d1, Y d2) {
return !((int) d1 && (int) d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<X>(!(static_cast<int>(d1) && static_cast<int>(d2)));
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<X>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalXor {
public:
op_def static Z op(X d1, Y d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return (i1 | i2) &~ (i1 & i2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalAnd {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) & static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(Y d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<Z>(119);
}
};
template <typename X, typename Y, typename Z>
class LogicalOr {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) | static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return static_cast<X>(119);
}
};
template <typename X, typename Y, typename Z>
class Mod {
public:
/*
// just a optional note, feel free to remove later
op_def static half op(half d1, half d2, half *params) {
return __float2half(simdOps::Mod<float>::op(__half2float(d1), __half2float(d2), nullptr));
}
*/
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d1) % static_cast<int>(d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOp
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X, typename Y, typename Z>
class ReverseMod {
public:
op_def static Z op(X d1, Y d2) {
return static_cast<int>(d2) % static_cast<int>(d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOp
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
/**
* Whether 2 elements in an array
* are epsilion equal
*/
template <typename X, typename Z>
class Epsilon {
public:
op_def static Z op(X d1, X d2) {
X diff = d1 - d2;
X absDiff = nd4j::math::nd4j_abs<X>(diff);
if (absDiff <= static_cast<X>(MIN))
return static_cast<Z>(1);
return static_cast<Z>(0);
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class EqualTo {
public:
op_def static Z op(X d1, X d2) {
return d1 == d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class NotEqualTo {
public:
op_def static Z op(X d1, X d2) {
return d1 != d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class GreaterThanOrEqual {
public:
op_def static Z op(X d1, X d2) {
return d1 >= d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
// FIXME: this signature clashes with MetaOp stuff
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class GreaterThan {
public:
op_def static Z op(X d1, X d2) {
return d1 > d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
// FIXME: this signature clashes with MetaOp stuff
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class LessThan {
public:
op_def static Z op(X d1, X d2) {
return d1 < d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X, typename Z>
class LessThanOrEqual {
public:
op_def static Z op(X d1, X d2) {
return d1 <= d2;
}
op_def static Z op(X d1, X d2, X *params) {
return op(d1, d2);
}
op_def static Z op(X d1, X *params) {
return d1;
}
};
template <typename X>
class Abs {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_abs<X>(d1);
}
};
template <typename X>
class Ceiling {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_ceil<X,X>(d1);
}
};
template <typename X, typename Z>
class Cosine {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_cos<X,Z>(d1);
}
};
template <typename X, typename Z>
class Exp {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_exp<X, Z>(d1);
}
};
template <typename X>
class HardTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return ((d1 >= static_cast<X>(-1.f) && d1 <= static_cast<X>(1.f)) ? static_cast<X>(1.f) : static_cast<X>(0.f));
}
};
template <typename X, typename Z>
class HardTanh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
if (d1 < static_cast<X>(-1))
return static_cast<Z>(-1);
else if (d1 > static_cast<X>(1))
return static_cast<Z>(1);
else
return d1;
}
};
template <typename X>
class Floor {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_floor<X,X>(d1);
}
};
template <typename X, typename Z>
class Log {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_log<X, Z>(d1);
}
};
template <typename X, typename Z>
class Log1p {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_log<X, Z>(1 + d1);
}
};
template <typename X, typename Y, typename Z>
class LogX {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_log<X, Z>(d1) / nd4j::math::nd4j_log<Y, Z>(d2) ;
}
};
template <typename X>
class StabilizeFP16 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 <= static_cast<X>(0))
return static_cast<X>(nd4j::DataTypeUtils::min<float16>());
else return d1;
}
};
template <typename X, typename Z>
class StabilizeX {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
if (d1 <= static_cast<X>(0))
return nd4j::DataTypeUtils::min<Z>();
else return d1;
}
};
template <typename X>
class SpecialDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * (static_cast<X>(1.f) - d1);
}
};
template <typename X>
class Neg {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return -d1;
}
};
template <typename X, typename Z>
class Erf {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_erf<X,Z>(d1);
}
};
template <typename X, typename Z>
class Erfc {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_erfc<X,Z>(d1);
}
};
template <typename X>
class Reciprocal {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
// op_def static T op(T d1) {
// return (T(1.0f) / d1);
// }
// op for MetaOps
op_def static X op(X d1, X *params) {
return (static_cast<X>(1) / d1);
}
};
template <typename X, typename Z>
class Sqr {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2));
}
op_def static Z op(X d1) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2));
}
};
template <typename X, typename Y, typename Z>
class RelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_re<X>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class BinaryRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
X threshold = params[0];
return nd4j::math::nd4j_re<X>(d1, d2) > threshold ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class BinaryMinimumAbsoluteRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, X *params) {
X d2 = params[0];
X thresholdRelative = params[1];
X thresholdAbsolute = params[2];
return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0);
}
op_def static Z op(X d1, Y d2, Z *params) {
X thresholdRelative = params[0];
X thresholdAbsolute = params[1];
return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0);
}
op_def static Z op(X d1) {
return static_cast<Z>(0);
}
};
template <typename X, typename Y, typename Z>
class Pow {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, params[0]);
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2);
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X, typename Y, typename Z>
class PowDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return params[0] * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(params[0]) - static_cast<Z>(1.f));
}
op_def static Z op(X d1, Y d2) {
return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f));
}
op_def static Z op(X d1, Y d2, Z *params) {
return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f));
}
op_def static Z op(X d1) {
return d1;
}
};
template <typename X>
class Round {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_round<X,X>(d1);
}
};
template <typename X, typename Z>
class IsNan {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isnan(d1) ? static_cast<X>(1) : static_cast<X>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Expm1 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_exp<X, Z>(d1) - static_cast<Z>(1);
}
};
template <typename X, typename Z>
class IsPositive {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return d1 > (X)0.f;
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsInf {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isinf<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsInfOrNan{
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(0) : static_cast<Z>(1);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class IsFinite {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static Z op(X d1, X *params) {
return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0);
}
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X>
class ClipByValue {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
if (d1 > params[1])
return params[1];
if (d1 < params[0])
return params[0];
return d1;
}
};
template <typename X, typename Y, typename Z>
class LstmClip {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
X _v = (X) d2;
if (d1 > _v)
return _v;
else if (d1 < _v)
return _v;
else return d1;
}
};
template <typename X, typename Z>
class Swish {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return static_cast<Z>(d1) * nd4j::math::nd4j_sigmoid<X,Z>(d1);
}
};
template <typename X>
class SwishDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X ex = nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(M_E), d1);
return (ex * (d1 + ex + static_cast<X>(1.f))) / nd4j::math::nd4j_pow<X, X, X>((ex + static_cast<X>(1.f)) , static_cast<X>(2.f));
}
};
template <typename X, typename Z>
class LogSigmoid {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_log<Z, Z>(nd4j::math::nd4j_sigmoid<X, Z>(d1));
}
};
template <typename X>
class LogSigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X ex = nd4j::math::nd4j_pow<X, X, X>(M_E, d1);
return static_cast<X>(1.f) / (ex + static_cast<X>(1.f));
}
};
template <typename X, typename Z>
class Sigmoid {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_sigmoid<X, Z>(d1);
}
};
template <typename X>
class SigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_sigmoidderivative<X, X>(d1);
}
};
template <typename X, typename Z>
class HardSigmoid {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_min<X>(static_cast<X>(1), nd4j::math::nd4j_max<X>(static_cast<X>(0), (static_cast<X>(0.2f)) * d1 + static_cast<X>(0.5f)));
}
};
template <typename X>
class HardSigmoidDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 < static_cast<X>(-2.5f) || d1 > static_cast<X>(2.5f) ? static_cast<X>(0.f) : static_cast<X>(0.2f);
}
};
/**
* Scale to be between a min and max
*/
template <typename X, typename Z>
class SetRange {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
auto min = params[0];
auto max = params[1];
if (static_cast<Z>(d1) >= min && static_cast<Z>(d1) <= max)
return static_cast<Z>(d1);
if (min == static_cast<Z>(0) && max == static_cast<Z>(1)) {
auto val = static_cast<Z>(1) / (static_cast<Z>(1) + nd4j::math::nd4j_exp<X, Z>(-d1));
return (nd4j::math::nd4j_floor<Z,Z>(val * (max - min)) + min);
}
return (nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1) * (max - min)) + min);
}
};
template <typename X, typename Z>
class Sin {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_sin<X,Z>(d1);
}
};
template <typename X>
class Square {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * d1;
}
};
template <typename X, typename Z>
class Sqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_sqrt<X, Z>(d1);
}
};
template <typename X, typename Z>
class RSqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return static_cast<Z>(1) / nd4j::math::nd4j_sqrt<X, Z>(d1);
}
};
template <typename X, typename Z>
class Rint {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_rint<X,Z>(d1);
}
};
template <typename X, typename Z>
class SoftPlus {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::softplus<X, Z>(d1);
}
};
template <typename X>
class Sign {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return (d1 > static_cast<X>(0)) - (d1 < static_cast<X>(0));
}
};
template <typename X>
class TimesOneMinus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * (static_cast<X>(1) - d1);
}
};
template <typename X, typename Z>
class RationalTanh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
// keep 2/3 as runtime variable, to match precision
auto dis = (static_cast<Z>(2) / static_cast<Z>(3)) * static_cast<Z>(d1);
auto tanh = nd4j::math::nd4j_sgn<Z,Z>(dis) * (static_cast<Z>(1) - (static_cast<Z>(1) / (static_cast<Z>(1) + static_cast<Z>(nd4j::math::nd4j_abs<Z>(dis)) + nd4j::math::nd4j_pow<Z, Z, Z>(dis, static_cast<Z>(2)) + static_cast<Z>(1.41645f) * nd4j::math::nd4j_pow<Z, Z, Z>(dis, static_cast<Z>(4)) )));
return static_cast<Z>(1.7159f) * tanh;
}
};
template <typename X>
class RationalTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
auto dis = (static_cast<X>(2.f) / static_cast<X>(3.f)) * d1;
auto a = static_cast<X>(1.f) + nd4j::math::nd4j_abs<X>(dis) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2.f)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4));
auto tDeriv = (static_cast<X>(1.f) + nd4j::math::nd4j_sign<X,X>(dis) * (static_cast<X>(2.f) * dis + static_cast<X>(4.f) * static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(3)))) / (a * a);
return static_cast<X>(1.7159f) * (static_cast<X>(2.f) / static_cast<X>(3.f)) * tDeriv;
}
};
template <typename X, typename Z>
class Tanh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_tanh<X, Z>(d1);
}
};
template <typename X, typename Z>
class RectifiedTanh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_max<Z>(static_cast<Z>(0), nd4j::math::nd4j_tanh<X,Z>(d1));
}
};
template <typename X>
class RectifiedTanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.f) ? nd4j::math::nd4j_tanhderivative<X,X>(d1) : static_cast<X>(0.f);
}
};
template <typename X, typename Z>
class ATanh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_atanh<X,Z>(d1);
}
};
template <typename X>
class TanhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_tanhderivative<X,X>(d1);
}
};
template <typename X>
class Cube {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 * d1 * d1;
}
};
template <typename X>
class CubeDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(3) * d1 * d1;
}
};
template <typename X, typename Z>
class ACos {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_acos<X, Z>(d1);
}
};
template <typename X, typename Z>
class ASinh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_asinh<X, Z>(d1);
}
};
template <typename X>
class ASinhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(nd4j::math::nd4j_pow<X, X, X>(d1, static_cast<X>(2.f)) + static_cast<X>(1.f)));
}
};
template <typename X, typename Z>
class ACosh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_acosh<X, Z>(d1);
}
};
template <typename X>
class ACoshDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(d1 - static_cast<X>(1.f)) * nd4j::math::nd4j_sqrt<X, X>(d1 + static_cast<X>(1.f)));
}
};
template <typename X>
class Ones {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.0f);
}
};
template <typename X, typename Z>
class SoftSign {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_softsign<X,Z>(d1);
}
};
template <typename X>
class SoftSignDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_softsignderivative<X,X>(d1);
}
};
template <typename X, typename Z>
class MatchConditionBool {
public:
no_op_exec_special_bool
no_op_exec_special_bool_cuda
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X *extraParams) {
X compare = extraParams[0];
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//nd4j_printf("value: %f; comp: %f; eps: %f; mode: %i;\n", d1, compare, eps, mode);
switch (mode) {
case 0: // equals
return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? true : false;
case 1: // not equals
return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? true : false;
case 2: // less_than
return d1 < compare ? true : false;
case 3: // greater_than
return d1 > compare ? true : false;
case 4: // less_or_equals_than
return d1 <= compare ? true : false;
case 5: // greater_or_equals_than
return d1 >= compare ? true : false;
case 6: // abs_less_than
return nd4j::math::nd4j_abs<X>(d1) < compare ? true : false;
case 7: // abs_greater_than
return nd4j::math::nd4j_abs<X>(d1) > compare ? true : false;
case 8: // is inf
return nd4j::math::nd4j_isinf(d1) ? true : false;
case 9: // is nan
return nd4j::math::nd4j_isnan(d1) ? true : false;
case 10:
return (d1 == compare) ? true : false;
case 11:
return (d1 != compare) ? true : false;
case 12: // abs_greater_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) >= compare ? true : false;
case 13: // abs_less_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) <= compare ? true : false;
case 14:
// isFinite
return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1));
case 15:
// isInfinite
return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1);
default:
printf("Undefined match condition: [%i]\n", mode);
}
return d1;
}
};
template <typename X, typename Z>
class MatchCondition {
public:
no_op_exec_special
no_op_exec_special_cuda
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return old + opOutput;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return old + opOutput;
}
// this op return 1.0 if condition met, 0.0 otherwise
op_def static Z op(X d1, X *extraParams) {
X compare = extraParams[0];
X eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//printf("value: %f; comp: %f; eps: %f; mode: %i;\n", (float) d1, (float) compare, (float) eps, mode);
switch (mode) {
case 0: // equals
return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? 1 : 0;
case 1: // not equals
return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? 1 : 0;
case 2: // less_than
return d1 < compare ? 1 : 0;
case 3: // greater_than
return d1 > compare ? 1 : 0;
case 4: // less_or_equals_than
return d1 <= compare ? 1 : 0;
case 5: // greater_or_equals_than
return d1 >= compare ? 1 : 0;
case 6: // abs_less_than
return nd4j::math::nd4j_abs<X>(d1) < compare ? 1 : 0;
case 7: // abs_greater_than
return nd4j::math::nd4j_abs<X>(d1) > compare ? 1 : 0;
case 8: // is inf
return nd4j::math::nd4j_isinf(d1) ? 1 : 0;
case 9: // is nan
return nd4j::math::nd4j_isnan(d1) ? 1 : 0;
case 10:
return (d1 == compare) ? 1 : 0;
case 11:
return (d1 != compare) ? 1 : 0;
case 12: // abs_greater_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) >= compare ? 1 : 0;
case 13: // abs_less_or_equals_than
return nd4j::math::nd4j_abs<X>(d1) <= compare ? 1 : 0;
case 14:
// isFinite
return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1)) ? 1 : 0;
case 15:
// isInfinite
return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1) ? 1 : 0;
default:
printf("Undefined match condition: [%i]\n", mode);
}
return d1;
}
op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class ELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_elu<X,Z>(d1);
}
};
template <typename X>
class ELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_eluderivative<X,X>(d1);
}
};
template <typename X, typename Y, typename Z>
class RELU {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto xt = static_cast<Z>(d1);
auto xf = static_cast<Z>(d2);
return xt < xf ? xf : xt;
}
};
template <typename X, typename Y, typename Z>
class SXELogitsSmoother {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return d1 * ((X)1.f - (X) d2) + (X)(0.5f) * (X) d2;
}
};
template <typename X, typename Y, typename Z>
class RELU6 {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
auto relu = simdOps::RELU<X,Y,Z>::op(d1, d2, params);
return relu < static_cast<Z>(6) ? relu : static_cast<Z>(6);
}
};
template <typename X, typename Y, typename Z>
class LeakyRELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_leakyrelu<X,Z>(d1, d2);
}
};
template <typename X, typename Z>
class SELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return d1 > static_cast<X>(0.0f) ? static_cast<Z>(SELU_LAMBDA) * static_cast<Z>(d1) : static_cast<Z>(SELU_LAMBDA) * (static_cast<Z>(SELU_ALPHA) * nd4j::math::nd4j_exp<X, Z>(d1) - static_cast<Z>(SELU_ALPHA));
}
};
template <typename X>
class SELUDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1 > static_cast<X>(0.f) ? static_cast<X>(SELU_LAMBDA) : static_cast<X>(SELU_ALPHA) * static_cast<X>(SELU_LAMBDA) * nd4j::math::nd4j_exp<X, X>(d1);
}
};
template <typename X, typename Y, typename Z>
class LeakyRELUDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
if (d1 >= static_cast<X>(0))
return static_cast<Z>(1);
else
return static_cast<Z>(d2);
}
};
template <typename X, typename Z>
class ASin {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_asin<X,Z>(d1);
}
};
template <typename X, typename Z>
class Sinh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_sinh<X,Z>(d1);
}
};
template <typename X>
class SinhDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return nd4j::math::nd4j_cosh<X, X>(d1);
}
};
template <typename X, typename Z>
class Cosh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_cosh<X,Z>(d1);
}
};
template <typename X, typename Z>
class Tan {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_tan<X,Z>(d1);
}
};
template <typename X>
class TanDerivative {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1.f) / nd4j::math::nd4j_pow<X, X, X>(nd4j::math::nd4j_cos<X, X>(d1), static_cast<X>(2.0f));
}
};
template <typename X, typename Z>
class ATan {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Z *params) {
return nd4j::math::nd4j_atan<X, Z>(d1);
}
};
template <typename X, typename Y, typename Z>
class Atan2 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_atan2<X, Z>(d2, d1);
}
op_def static Z op(X d1, Y d2, Z *params) {
return op(d1, d2);
}
// op for MetaOps
op_def static Z op(X d1, Y *params) {
return op(d1, params[0]);
}
};
template <typename X>
class Identity {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return d1;
}
};
template <typename X>
class Stabilize {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
X k = params[0];
if (d1 * k > static_cast<X>(- MIN_CUTFOFF))
return static_cast<X>(- MIN_CUTFOFF) / k;
else if (d1 * k < static_cast<X>(MIN_CUTFOFF))
return static_cast<X>(MIN_CUTFOFF) / k;
return d1;
}
};
template <typename X, typename Y, typename Z>
class Step {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return (d1 > static_cast<X>(d2) ? static_cast<Z>(1) : static_cast<Z>(0));
}
};
template <typename X>
class OneMinus {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
op_def static X op(X d1, X *params) {
return static_cast<X>(1) - d1;
}
};
template <typename X>
class Sum {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class ShannonEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2)) * nd4j::math::nd4j_log<X, Z>(nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2.0f)));
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return -reduction;
}
};
template <typename X, typename Z>
class LogEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
//entropy is -sum(p(x) * log(p(x))); log entropy is log of this
return nd4j::math::nd4j_log<X, Z>(-reduction);
}
};
template <typename X, typename Z>
class Entropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(-reduction); //entropy is -sum(p(x) * log(p(x)))
}
};
template <typename X>
class ASum {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X, typename Z>
class CountNonZero {
public:
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1 == static_cast<X>(0.0f) ? static_cast<Z>(0.0f) : static_cast<Z>(1.0f);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class CountZero {
public:
no_op_exec_special_accumulation_long
no_op_exec_special_accumulation_cuda
op_def static Z startingValue(const X *input) {
return static_cast<Z>(0.0f);
}
op_def static Z merge(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(Z old, Z opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1 == static_cast<X>(0) ? static_cast<X>(1) : static_cast<X>(0);
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return static_cast<Z>(reduction);
}
};
template <typename X>
class Prod {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static X update(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Any {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, X *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0) ;
}
};
template <typename X, typename Z>
class All {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(1);
}
op_def static Z merge(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static Z update(X old, X opOutput, X *extraParams) {
return opOutput * old;
}
op_def static Z op(X d1, X *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0);
}
};
template <typename X, typename Z>
class Mean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return (Z) reduction / (Z) n;
}
};
template <typename X, typename Z>
class AMean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static X update(X old, X opOutput, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old);
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction) / static_cast<X>(n);
}
};
template <typename X>
class Max {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(old, opOutput);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(opOutput, old);
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Y, typename Z>
class AMaxPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
op(d1, d2);
}
op_def static Z op(X d1, Y d2) {
auto z1 = static_cast<Z>(d1);
auto z2 = static_cast<Z>(d2);
return nd4j::math::nd4j_max<Z>(nd4j::math::nd4j_abs<Z>(z1), nd4j::math::nd4j_abs<Z>(z2));
}
};
template <typename X, typename Y, typename Z>
class AMinPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
op(d1, d2);
}
op_def static Z op(X d1, Y d2) {
auto z1 = static_cast<Z>(d1);
auto z2 = static_cast<Z>(d2);
return nd4j::math::nd4j_min<Z>(nd4j::math::nd4j_abs<Z>(z1), nd4j::math::nd4j_abs<Z>(z2));
}
};
template <typename X, typename Y, typename Z>
class MaxPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
};
template <typename X, typename Y, typename Z>
class MinPairwise {
public:
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
op_def static Z op(X d1, Y d2) {
return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2));
}
};
template <typename X>
class AMax {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old));
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_abs<X>(d1) > nd4j::math::nd4j_abs<X>(d2) ? d1 : d2;
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X>
class AMin {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old));
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2));
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return nd4j::math::nd4j_abs<X>(d1);
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return nd4j::math::nd4j_abs<X>(reduction);
}
};
template <typename X>
class Min {
public:
no_op_exec_special_accumulation_same
no_op_exec_special_accumulation_same_cuda
op_def static X startingValue(const X *input) {
return input[0];
}
op_def static X merge(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(old, opOutput);
}
op_def static X update(X old, X opOutput, X *extraParams) {
return nd4j::math::nd4j_min<X>(opOutput, old);
}
op_def static X op(X d1, X d2, X *params) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
op_def static X op(X d1, X d2) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static X op(X d1, X *extraParams) {
return d1;
}
op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) {
return reduction;
}
};
template <typename X, typename Z>
class Norm1 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(nd4j::math::nd4j_abs<X>(d1));
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(reduction);
}
};
template <typename X, typename Z>
class Norm2 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_sqrt<X, Z>(reduction);
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1 * d1);
}
};
template <typename X, typename Z>
class SquaredNorm {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return static_cast<Z>(d1 * d1);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(reduction);
}
};
template <typename X, typename Z>
class NormFrobenius {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
X v = nd4j::math::nd4j_abs<X>(d1);
return static_cast<Z>(v * v);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_sqrt<X, Z>(reduction);
}
};
template <typename X, typename Z>
class NormP {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z op(X d1, Z *extraParams) {
return nd4j::math::nd4j_pow<X, Z, Z>(nd4j::math::nd4j_abs<X>(d1), extraParams[0]);
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return nd4j::math::nd4j_pow<X, Z, Z>(reduction, static_cast<Z>(1.0f) / extraParams[0]);
}
};
template <typename X, typename Z>
class NormMax {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return opOutput + old;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(old),
nd4j::math::nd4j_abs<X>(opOutput));
}
op_def static Z op(X d1, Z *extraParams) {
return d1;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
return static_cast<Z>(nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(reduction), nd4j::math::nd4j_abs<X>(reduction)));
}
};
template <typename X, typename Z>
class Variance {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static X op(X d1, Z *extraParams) {
X mean = static_cast<X>(extraParams[0]);
X ret = d1 - mean;
return ret * ret;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
// T bias = extraParams[1];
// return (reduction - (nd4j::math::nd4j_pow<T>(bias, static_cast<T>(2.0f)) / static_cast<T>(n))) / (n - 1)
return static_cast<Z>(reduction) / static_cast<Z>(n - 1);
}
};
/**
* Standard deviation of a buffer
*/
template <typename X, typename Z>
class StandardDeviation {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static X startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Z merge(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z update(X old, X opOutput, Z *extraParams) {
return old + opOutput;
}
op_def static Z op(X d1, Z *extraParams) {
X mean = extraParams[0];
X ret = d1 - mean;
return ret * ret;
}
op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) {
Z ret = Variance<X,Z>::postProcess(reduction, n, extraParams);
Z sqrtRet = nd4j::math::nd4j_sqrt<X, Z>(ret);
return sqrtRet;
}
};
template <typename X, typename Y>
class CosineSimilarity {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1]));
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(d1 * d1);
extraParams[1] += static_cast<Y>(d2 * d2);
return static_cast<Y>(d1 * d2);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],static_cast<Y>(d1 * d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],static_cast<Y>(d2 * d2));
return static_cast<Y>(d1 * d2);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class JaccardDistance {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(const X *input) {
return static_cast<X>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
// num / denom
return (static_cast<Y>(1.0f)) - (extraParams[0] / extraParams[1]);
}
op_def static Y num(X d1, X d2) {
return nd4j::math::nd4j_min<X>(d1, d2);
}
op_def static Y denom(X d1, X d2) {
return nd4j::math::nd4j_max<X>(d1, d2);
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(num(d1, d2));
extraParams[1] += static_cast<Y>(denom(d1, d2));
return static_cast<Y>(0.0f);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],num(d1, d2));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], denom(d1, d2));
return static_cast<Y>(0.0f);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class SimpleHammingDistance {
public:
static const int extraParamsLen = 0;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return static_cast<Y>(reduction / n);
}
op_def static Y op(X d1, X d2, Y *extraParams) {
return (d1 == d2) ? static_cast<Y>(0.0f) : static_cast<Y>(1.0f);
}
op_def static void aggregateExtraParams(X *extraParamsTotal, X *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParams) {
return op(d1, d2, extraParams);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
template <typename X, typename Y>
class CosineDistance {
public:
static const int extraParamsLen = 2;
op_def static X *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParams) {
//delete[] extraParams;
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) {
return (static_cast<Y>(1.0f)) - (reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1])));
}
op_def static Y op(X d1, X d2, Y *extraParams) {
extraParams[0] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d1) * nd4j::math::nd4j_abs<X>(d1));
extraParams[1] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d2) * nd4j::math::nd4j_abs<X>(d2));
return (d1 * d2);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0], nd4j::math::nd4j_abs<Y>(d1) * nd4j::math::nd4j_abs<Y>(d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], nd4j::math::nd4j_abs<Y>(d2) * nd4j::math::nd4j_abs<Y>(d2));
return (d1 * d2);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParams) {
return old + opOutput;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParams) {
return update(old, opOutput, extraParams);
}
};
/**
* Dot product between 2 arrays
*/
template <typename X, typename Y>
class Dot {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
//delete[] * extraParamsRef;
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return reduction;
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
return static_cast<Y>(d1 * d2);
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return opOutput + old;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {}
};
/**
* Op to check equality within arrays
*/
template <typename X, typename Z>
class EqualsWithEps {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Z startingValue(X *input) {
return static_cast<Z>(0.0f);
}
op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParamsRef) {
return reduction;
}
op_def static Z op(X d1, X d2, Z *extraParamsRef) {
Z eps = nd4j::math::nd4j_abs<Z>(extraParamsRef[2]);
Z diff = static_cast<Z>(nd4j::math::nd4j_abs<X>(d1 - d2));
// works well except in the range of very large numbers
if (diff <= eps)
return static_cast<Z>(0.f);
// Knuth approach
// works well except in the range of very small numbers
if (diff <= nd4j::math::nd4j_max<Z>(nd4j::math::nd4j_abs<Z>(static_cast<Z>(d1)), nd4j::math::nd4j_abs<Z>(static_cast<Z>(d2))) * eps)
return static_cast<Z>(0.f);
return static_cast<Z>(1.f);
}
#ifdef __CUDACC__
__device__
static inline Z opAtomic(X d1, X d2, Z *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Z update(Z old, Z opOutput, Z *extraParamsRef) {
return opOutput + old;
}
op_def static Z merge(X old, Z opOutput, Z *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Z *extraParamsTotal, Z *extraParamsLocal) {}
};
template <typename X, typename Y>
class EuclideanDistance {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return nd4j::math::nd4j_sqrt<Y, Y>(reduction);
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
X ret = d1 - d2;
return static_cast<Y>(ret * ret);
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return opOutput + old;
}
op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {}
};
template <typename X, typename Y>
class ManhattanDistance {
public:
static const int extraParamsLen = 0;
op_def static X * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(X *extraParamsRef) {
//no-op
}
op_def static Y startingValue(X *input) {
return static_cast<Y>(0.0f);
}
op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) {
return reduction;
}
op_def static Y op(X d1, X d2, Y *extraParamsRef) {
return nd4j::math::nd4j_abs<X>(d1 - d2);
}
op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) {
return old + opOutput;
}
op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
#ifndef __clang__
#pragma omp declare simd uniform(extraParamsRef)
#endif
op_def static Y merge(X old, X opOutput, X *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
};
template <typename X>
class IndexAbsoluteMax {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return nd4j::math::nd4j_abs<X>(val);
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value);
old.value = nd4j::math::nd4j_abs<X>(old.value);
if (opOutput.value > old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (nd4j::math::nd4j_abs<X>(f1.value) > nd4j::math::nd4j_abs<X>(f2.value))
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline X startingValue(X *input) {
return 0;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class FirstIndex {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams);
//printf("res: %f; oldIdx: %i; newIdx: %i\n", res, old.index, opOutput.index);
if (res == static_cast<X>(0))
return old;
if (old.index < 0)
return opOutput;
if (old.index > opOutput.index)
return opOutput;
return old;
}
static _CUDA_HD inline X startingValue(X *input) {
return -nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.index > f2.index)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
};
template <typename X>
class LastIndex {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams);
if (res == static_cast<X>(0))
return old;
if (old.index < 0)
return opOutput;
if (old.index < opOutput.index)
return opOutput;
return old;
}
static _CUDA_HD inline X startingValue(X *input) {
return -nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.index < f2.index)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
};
template <typename X>
class IndexMax {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
if (opOutput.value > old.value) {
return opOutput;
}
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.value > f2.value)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline X startingValue(X *input) {
return -nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class IndexAbsoluteMin {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(
functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD inline X startingValue(X *input) {
return nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value);
old.value = nd4j::math::nd4j_abs<X>(old.value);
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (nd4j::math::nd4j_abs<X>(f1.value) < nd4j::math::nd4j_abs<X>(f2.value))
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X>
class IndexMin {
public:
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(
functions::indexreduce::IndexValue<X> val, X *extraParams) {
return val;
}
static _CUDA_HD inline X startingValue(X *input) {
return nd4j::DataTypeUtils::max<X>();
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) {
functions::indexreduce::IndexValue<X> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) {
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge(
functions::indexreduce::IndexValue<X> f1,
functions::indexreduce::IndexValue<X> f2, X *extraParams) {
if (f1.value < f2.value)
return f2;
return f1;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess(
functions::indexreduce::IndexValue<X> reduction, int n, int xOffset,
X *dx, int incx, X *extraParams, X *result) {
return reduction;
}
static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1,
functions::indexreduce::IndexValue<X> d2, X *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class SummaryStatsVariance {
public:
static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) {
if (biasCorrected) {
Z ret = static_cast<Z>(val.varianceBiasCorrected());
if (ret < static_cast<Z>(0.0f))
return static_cast<Z>(val.variance());
return ret;
}
return static_cast<Z>(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) {
return d1;
}
};
template <typename X, typename Z>
class SummaryStatsStandardDeviation {
public:
static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) {
if (biasCorrected) {
auto ret = static_cast<Z>(val.varianceBiasCorrected());
if (ret < static_cast<Z>(0.0f))
return nd4j::math::nd4j_sqrt<double, Z>(val.variance());
else
return nd4j::math::nd4j_sqrt<double, Z>(ret);
}
return nd4j::math::nd4j_sqrt<double, Z>(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) {
return d1;
}
};
template <typename X>
class DropOut {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
inline _CUDA_D static X op(X d1, X *params) {
X prob = params[0];
#ifdef __CUDACC__
X length = params[1];
X tid = gridDim.x * blockDim.x + threadIdx.x;
X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid)));
#else
X rnd = static_cast<X>(rand() / RAND_MAX);
#endif
return rnd >= prob ? static_cast<X>(0.0f) : d1;
}
};
template <typename X, typename Y, typename Z>
class DropOutInverted {
public:
no_op_exec_special
no_op_exec_special_cuda
#ifdef __CUDACC__
__device__
#endif
inline static Z op(X d1, Y d2, Z *params) {
Y prob = d2;
#ifdef __CUDACC__
X length = params[1];
X tid = gridDim.x * blockDim.x + threadIdx.x;
X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid)));
#else
X rnd = static_cast<X>(rand() / RAND_MAX);
#endif
return rnd >= static_cast<X>(prob) ? static_cast<Z>(0.0f) : reinterpret_cast<Z>(d1 / static_cast<X>(prob));
}
};
template <typename X, typename Y, typename Z>
class ReplaceNans {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static Z op(X d1, Y d2, Z *params) {
return nd4j::math::nd4j_isnan(d1) ? static_cast<Z>(d2) : static_cast<Z>(d1) ;
}
};
// this op is used for conditional pairwise transforms only
template <typename X, typename Y, typename Z>
class CompareAndReplace{
public:
// op definition for PairWise Transform
op_def static Z op(X d1, Y d2, Z *params) {
auto zd1 = static_cast<Z>(d1);
auto zd2 = static_cast<Z>(d2);
auto compare = params[0];
auto eps = params[2];
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<Z>(zd1 - compare) <= eps)
return zd2;
else
return zd1;
else if (mode == 1) // not equals eps
if (nd4j::math::nd4j_abs<Z>(zd1 - compare) > eps)
return zd2;
else
return zd1;
else if (mode == 2) // less_than eps
if (zd1 < compare)
return zd2;
else
return zd1;
else if (mode ==3) // greater_than
if (zd1 > compare)
return zd2;
else
return zd1;
else if (mode == 4) // less_or_equals_than
if (zd1 <= compare)
return zd2;
else
return zd1;
else if (mode == 5) // greater_or_equals_than
if (zd1 >= compare)
return zd2;
else
return zd1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<Z>(zd1) < compare)
return zd2;
else
return zd1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<Z>(zd1) > compare)
return zd2;
else
return zd1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(zd1))
return zd2;
else
return zd1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(zd1))
return zd2;
else
return zd1;
else if (mode == 10)
if (zd1 == compare)
return zd2;
else
return zd1;
else if (mode == 11)
if (zd1 != compare)
return zd2;
else
return zd1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<Z>(zd1) >= compare)
return zd2;
else
return zd1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<Z>(zd1) <= compare)
return zd2;
else
return zd1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return zd1;
}
};
template <typename X, typename Y, typename Z>
class CompareAndSet {
public:
// op definition for PairWise Transform
op_def static Z op(X dX, Y dY, Z *params) {
auto d1 = static_cast<Z>(dX);
auto d2 = static_cast<Z>(dY);
auto compare = params[0];
auto eps = params[2];
auto mode = static_cast<int>(params[3]);
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<Z>(d2 - compare) <= eps)
return d2;
else
return d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<Z>(d2 - compare) > eps)
return d2;
else
return d1;
else if (mode == 2) // less_than
if (d2 < compare)
return d2;
else
return d1;
else if (mode ==3) // greater_than
if (d2 > compare)
return d2;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d2 <= compare)
return d2;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d2 >= compare)
return d2;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<Z>(d2) < compare)
return d2;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<Z>(d2) > compare)
return d2;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d2))
return d2;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d2))
return d2;
else
return d1;
else if (mode == 10)
if (d2 == compare)
return d2;
else
return d1;
else if (mode == 11)
if (d2 != compare)
return d2;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<Z>(d1) >= compare)
return d2;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<Z>(d1) <= compare)
return d2;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
template <typename X>
class CompareAndSetTransform {
public:
no_op_exec_special_same
no_op_exec_special_same_cuda
// op definition for Transform
op_def static X op(X d1, X *params) {
auto compare = params[0];
auto set = params[1];
auto eps = params[2];
// with mode == 0 we do set if d1 equals to compare, and with mode == 1 - we go otherwise
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<X>(d1 - compare) <= eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? set : d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<X>(d1 - compare) > eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? set : d1;
else if (mode == 2) // less_than
if (d1 < compare)
return set;
else
return d1;
else if (mode ==3) // greater_than
if (d1 > compare)
return set;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d1 <= compare)
return set;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d1 >= compare)
return set;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<X>(d1) < compare)
return set;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<X>(d1) > compare)
return set;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d1))
return set;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d1))
return set;
else
return d1;
else if (mode == 10)
if (d1 == compare)
return set;
else
return d1;
else if (mode == 11)
if (d1 != compare)
return set;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<X>(d1) >= compare)
return set;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<X>(d1) <= compare)
return set;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
}
#endif
|
fsm3d_dfsm_openmp_v1.c | #include "openst/eikonal/fsm.h"
#define M_FSM3D_IMP_NAME "DFSM"
const char OPENST_FSM3D_COMPUTEPARTIAL_IMP_NAME[] = M_FSM3D_IMP_NAME;
const size_t OPENST_FSM3D_COMPUTEPARTIAL_IMP_NAME_LENGTH = sizeof(M_FSM3D_IMP_NAME);
int OpenST_FSM3D_ComputePartial_1H(OPENST_FLOAT *U, OPENST_FLOAT *V,
size_t NI, size_t NJ, size_t NK,
OPENST_FLOAT H,
int start_iter, int max_iter, int *converged,
size_t BSIZE_I, size_t BSIZE_J, size_t BSIZE_K,
OPENST_FLOAT EPS){
int total_it, it, notconvergedl, notconvergedt;
int REVI, REVJ, REVK;
size_t levelr, K1, K2, kr, level, I1, I2, ir, jr;
if(start_iter >= max_iter){
return max_iter;
}
total_it = start_iter;
notconvergedl = 0;
#pragma omp parallel default(none) \
shared(total_it, notconvergedl, NI, NJ, NK, \
U, V, H, max_iter, EPS, start_iter) \
private(it, notconvergedt, \
REVI, REVJ, REVK, levelr, K1, K2, kr, level, I1, I2, ir, jr)
{
for(it = start_iter; it < max_iter; ++it){
#pragma omp single nowait
{
++total_it;
notconvergedl = 0;
}
notconvergedt = 0;
OpenST_FSM3D_GetSweepOrder(it, &REVI, &REVJ, &REVK);
for(levelr = 0; levelr < NI + NJ + NK - 2; ++levelr){
K1 = (NI + NJ - 2 < levelr) ?
(levelr - NI - NJ + 2) : 0;
K2 = (NK - 1 > levelr) ? levelr : NK - 1;
for(kr = K1; kr <= K2; ++kr){
level = levelr - kr;
I1 = (NJ - 1 < level) ? (level - NJ + 1) : 0;
I2 = (NI - 1 > level) ? level : NI - 1;
#pragma omp for nowait
for(ir = I1; ir <= I2; ++ir){
jr = level - ir;
if(OpenST_FSM3D_NodeUpdate_1H(U, V,
NI, NJ, NK,
H,
REVI, REVJ, REVK,
ir, jr, kr, EPS)){
notconvergedt = 1;
}
}
}
#pragma omp barrier
}
#pragma omp atomic
notconvergedl += notconvergedt;
#pragma omp barrier
#pragma omp flush (notconvergedl)
if(!notconvergedl){
break;
}
#pragma omp barrier
}
}
*converged = (notconvergedl == 0);
return total_it;
}
int OpenST_FSM3D_ComputePartial(OPENST_FLOAT *U, OPENST_FLOAT *V,
size_t NI, size_t NJ, size_t NK,
OPENST_FLOAT HI, OPENST_FLOAT HJ, OPENST_FLOAT HK,
int start_iter, int max_iter, int *converged,
size_t BSIZE_I, size_t BSIZE_J, size_t BSIZE_K,
OPENST_FLOAT EPS){
int total_it, it, notconvergedl, notconvergedt;
int REVI, REVJ, REVK;
size_t levelr, K1, K2, kr, level, I1, I2, ir, jr;
if((HI == HJ) && (HI == HK)){
return OpenST_FSM3D_ComputePartial_1H(U, V,
NI, NJ, NK,
HI,
start_iter, max_iter, converged,
BSIZE_I, BSIZE_J, BSIZE_K, EPS);
}
if(start_iter >= max_iter){
return max_iter;
}
total_it = start_iter;
notconvergedl = 0;
#pragma omp parallel default(none) \
shared(total_it, notconvergedl, NI, NJ, NK, \
U, V, HI, HJ, HK, max_iter, EPS, start_iter) \
private(it, notconvergedt, \
REVI, REVJ, REVK, levelr, K1, K2, kr, level, I1, I2, ir, jr)
{
for(it = start_iter; it < max_iter; ++it){
#pragma omp single nowait
{
++total_it;
notconvergedl = 0;
}
notconvergedt = 0;
OpenST_FSM3D_GetSweepOrder(it, &REVI, &REVJ, &REVK);
for(levelr = 0; levelr < NI + NJ + NK - 2; ++levelr){
K1 = (NI + NJ - 2 < levelr) ?
(levelr - NI - NJ + 2) : 0;
K2 = (NK - 1 > levelr) ? levelr : NK - 1;
for(kr = K1; kr <= K2; ++kr){
level = levelr - kr;
I1 = (NJ - 1 < level) ? (level - NJ + 1) : 0;
I2 = (NI - 1 > level) ? level : NI - 1;
#pragma omp for nowait
for(ir = I1; ir <= I2; ++ir){
jr = level - ir;
if(OpenST_FSM3D_NodeUpdate(U, V,
NI, NJ, NK,
HI, HJ, HK,
REVI, REVJ, REVK,
ir, jr, kr, EPS)){
notconvergedt = 1;
}
}
}
#pragma omp barrier
}
#pragma omp atomic
notconvergedl += notconvergedt;
#pragma omp barrier
#pragma omp flush (notconvergedl)
if(!notconvergedl){
break;
}
#pragma omp barrier
}
}
*converged = (notconvergedl == 0);
return total_it;
}
|
mre.c | /*
Copyright (c) 2009-2011, Jun Namikawa <jnamika@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "utils.h"
#include "mre.h"
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
#ifndef FIXED_GATE
#define FIXED_GATE 0
#endif
#ifndef INIT_GAMMA
#define INIT_GAMMA 10
#endif
#ifdef ENABLE_ADAPTIVE_LEARNING_RATE
#ifndef MAX_ITERATION_IN_ADAPTIVE_LR
#define MAX_ITERATION_IN_ADAPTIVE_LR 1000
#endif
#ifndef MAX_PERF_INC
#define MAX_PERF_INC 1.1
#endif
#ifndef LR_DEC
#define LR_DEC 0.7
#endif
#ifndef LR_INC
#define LR_INC 1.05
#endif
#endif // ENABLE_ADAPTIVE_LEARNING_RATE
/******************************************************************************/
/********** Initialization and Free *******************************************/
/******************************************************************************/
void init_mre_state (
struct mre_state *mre_s,
struct mixture_of_rnn_experts *mre,
int length)
{
assert(length > 0);
mre_s->mre = mre;
mre_s->length = length;
mre_state_alloc(mre_s);
for (int i = 0; i < mre->expert_num; i++) {
for (int n = 0; n < length; n++) {
mre_s->gate[i][n] = 1.0/(double)mre->expert_num;
mre_s->beta[i][n] = 0;
mre_s->delta_beta[i][n] = 0;
}
mre_s->expert_rnn_s[i] = NULL;
}
}
void init_mixture_of_rnn_experts (
struct mixture_of_rnn_experts *mre,
int expert_num,
int in_state_size,
int c_state_size,
int out_state_size)
{
/*
* Mixture of RNN experts has to contain at least one expert RNN.
* Each RNN expert has to contain at least one context neuron and one output
* neuron.
* An input neuron is not necessarily required.
*/
assert(expert_num >= 1);
assert(in_state_size >= 0);
assert(c_state_size >= 1);
assert(out_state_size >= 1);
mre->expert_num = expert_num;
mre->series_num = 0;
mre->in_state_size = in_state_size;
mre->out_state_size = out_state_size;
mre->fixed_gate = FIXED_GATE;
mre->gate_prior_distribution = GAUSS_DISTRIBUTION;
mre->mre_s = NULL;
MALLOC(mre->gamma, expert_num);
MALLOC(mre->expert_rnn, expert_num);
for (int i = 0; i < expert_num; i++) {
mre->gamma[i] = INIT_GAMMA;
init_recurrent_neural_network(mre->expert_rnn + i,
in_state_size, c_state_size, out_state_size);
}
}
void mre_add_target (
struct mixture_of_rnn_experts *mre,
int length,
const double* const* input,
const double* const* target)
{
mre->series_num++;
REALLOC(mre->mre_s, mre->series_num);
init_mre_state(mre->mre_s + (mre->series_num-1), mre, length);
for (int i = 0; i < mre->expert_num; i++) {
rnn_add_target(mre->expert_rnn + i, length, input, target);
for (int j = 0; j < mre->series_num; j++) {
mre->mre_s[j].expert_rnn_s[i] = mre->expert_rnn[i].rnn_s + j;
}
}
}
void mre_clean_target (struct mixture_of_rnn_experts *mre)
{
for (int i = 0; i < mre->expert_num; i++) {
rnn_clean_target(mre->expert_rnn + i);
}
for (int i = 0; i < mre->series_num; i++) {
free_mre_state(mre->mre_s + i);
}
FREE(mre->mre_s);
mre->series_num = 0;
}
void mre_state_alloc (struct mre_state *mre_s)
{
const int expert_num = mre_s->mre->expert_num;
const int out_state_size = mre_s->mre->out_state_size;
const int length = mre_s->length;
MALLOC(mre_s->joint_likelihood, length);
MALLOC2(mre_s->gate, expert_num, length);
MALLOC2(mre_s->beta, expert_num, length);
MALLOC2(mre_s->delta_beta, expert_num, length);
MALLOC2(mre_s->generation_likelihood, expert_num, length);
MALLOC2(mre_s->discrimination_likelihood, expert_num, length);
MALLOC2(mre_s->prior_likelihood, expert_num, length);
MALLOC2(mre_s->out_state, length, out_state_size);
MALLOC(mre_s->expert_rnn_s, expert_num);
#ifdef ENABLE_ADAPTIVE_LEARNING_RATE
MALLOC(mre_s->tmp_gate, length * expert_num);
MALLOC(mre_s->tmp_beta, length * expert_num);
#endif
}
void free_mre_state (struct mre_state *mre_s)
{
FREE(mre_s->joint_likelihood);
FREE2(mre_s->gate);
FREE2(mre_s->beta);
FREE2(mre_s->delta_beta);
FREE2(mre_s->generation_likelihood);
FREE2(mre_s->discrimination_likelihood);
FREE2(mre_s->prior_likelihood);
FREE2(mre_s->out_state);
FREE(mre_s->expert_rnn_s);
#ifdef ENABLE_ADAPTIVE_LEARNING_RATE
FREE(mre_s->tmp_gate);
FREE(mre_s->tmp_beta);
#endif
}
void free_mixture_of_rnn_experts (struct mixture_of_rnn_experts *mre)
{
FREE(mre->gamma);
for (int i = 0; i < mre->expert_num; i++) {
free_recurrent_neural_network(mre->expert_rnn + i);
}
FREE(mre->expert_rnn);
for (int i = 0; i < mre->series_num; i++) {
free_mre_state(mre->mre_s + i);
}
FREE(mre->mre_s);
mre->series_num = 0;
}
/******************************************************************************/
/********** File IO ***********************************************************/
/******************************************************************************/
void fwrite_mre_state (
const struct mre_state *mre_s,
FILE *fp)
{
FWRITE(&mre_s->length, 1, fp);
for (int i = 0; i < mre_s->mre->expert_num; i++) {
FWRITE(mre_s->gate[i], mre_s->length, fp);
FWRITE(mre_s->beta[i], mre_s->length, fp);
FWRITE(mre_s->delta_beta[i], mre_s->length, fp);
}
}
void fread_mre_state (
struct mre_state *mre_s,
FILE *fp)
{
FREAD(&mre_s->length, 1, fp);
mre_state_alloc(mre_s);
for (int i = 0; i < mre_s->mre->expert_num; i++) {
FREAD(mre_s->gate[i], mre_s->length, fp);
FREAD(mre_s->beta[i], mre_s->length, fp);
FREAD(mre_s->delta_beta[i], mre_s->length, fp);
}
}
void fwrite_mixture_of_rnn_experts (
const struct mixture_of_rnn_experts *mre,
FILE *fp)
{
FWRITE(&mre->expert_num, 1, fp);
FWRITE(&mre->series_num, 1, fp);
FWRITE(&mre->in_state_size, 1, fp);
FWRITE(&mre->out_state_size, 1, fp);
FWRITE(&mre->fixed_gate, 1, fp);
FWRITE(&mre->gate_prior_distribution, 1, fp);
FWRITE(mre->gamma, mre->expert_num, fp);
for (int i = 0; i < mre->expert_num; i++) {
fwrite_recurrent_neural_network(mre->expert_rnn + i, fp);
}
for (int i = 0; i < mre->series_num; i++) {
fwrite_mre_state(mre->mre_s + i, fp);
}
}
void fread_mixture_of_rnn_experts (
struct mixture_of_rnn_experts *mre,
FILE *fp)
{
FREAD(&mre->expert_num, 1, fp);
FREAD(&mre->series_num, 1, fp);
FREAD(&mre->in_state_size, 1, fp);
FREAD(&mre->out_state_size, 1, fp);
FREAD(&mre->fixed_gate, 1, fp);
FREAD(&mre->gate_prior_distribution, 1, fp);
MALLOC(mre->gamma, mre->expert_num);
MALLOC(mre->expert_rnn, mre->expert_num);
MALLOC(mre->mre_s, mre->series_num);
FREAD(mre->gamma, mre->expert_num, fp);
for (int i = 0; i < mre->expert_num; i++) {
fread_recurrent_neural_network(mre->expert_rnn + i, fp);
}
for (int i = 0; i < mre->series_num; i++) {
mre->mre_s[i].mre = mre;
fread_mre_state(mre->mre_s + i, fp);
for (int j = 0; j < mre->expert_num; j++) {
mre->mre_s[i].expert_rnn_s[j] = mre->expert_rnn[j].rnn_s + i;
}
}
}
/******************************************************************************/
/********** Computation of Mixture-of-RNN-Experts *****************************/
/******************************************************************************/
int mre_get_total_length (const struct mixture_of_rnn_experts *mre)
{
int total_length = 0;
for (int i = 0; i < mre->series_num; i++) {
total_length += mre->mre_s[i].length;
}
return total_length;
}
double mre_get_error (const struct mre_state *mre_s)
{
double error = 0;
for (int n = 0; n < mre_s->length; n++) {
for (int i = 0; i < mre_s->mre->out_state_size; i++) {
double d = mre_s->out_state[n][i] -
mre_s->expert_rnn_s[0]->teach_state[n][i];
error += 0.5 * d * d;
}
}
return error;
}
double mre_get_total_error (const struct mixture_of_rnn_experts *mre)
{
double error[mre->series_num];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < mre->series_num; i++) {
error[i] = mre_get_error(mre->mre_s + i);
}
double total_error = 0;
for (int i = 0; i < mre->series_num; i++) {
total_error += error[i];
}
return total_error;
}
double mre_get_joint_likelihood (const struct mre_state *mre_s)
{
double likelihood;
likelihood = 0;
for (int n = 0; n < mre_s->length; n++) {
likelihood += log(mre_s->joint_likelihood[n]);
}
return likelihood;
}
double mre_get_total_joint_likelihood (const struct mixture_of_rnn_experts *mre)
{
double likelihood[mre->series_num];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < mre->series_num; i++) {
likelihood[i] = mre_get_joint_likelihood(mre->mre_s + i);
}
double total_likelihood = 0;
for (int i = 0; i < mre->series_num; i++) {
total_likelihood += likelihood[i];
}
return total_likelihood;
}
double mre_get_prior_likelihood (const struct mre_state *mre_s)
{
double likelihood;
likelihood = 0;
for (int n = 0; n < mre_s->length; n++) {
for (int i = 0; i < mre_s->mre->expert_num; i++) {
likelihood += log(mre_s->prior_likelihood[i][n]);
}
}
return likelihood;
}
double mre_get_total_prior_likelihood (const struct mixture_of_rnn_experts *mre)
{
double likelihood[mre->series_num];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < mre->series_num; i++) {
likelihood[i] = mre_get_prior_likelihood(mre->mre_s + i);
}
double total_likelihood = 0;
for (int i = 0; i < mre->series_num; i++) {
total_likelihood += likelihood[i];
}
return total_likelihood;
}
static void set_out_state_from_expert_rnns (struct mre_state *mre_s)
{
const int expert_num = mre_s->mre->expert_num;
const int length = mre_s->length;
const int out_state_size = mre_s->mre->out_state_size;
for (int n = 0; n < length; n++) {
for (int i = 0; i < out_state_size; i++) {
mre_s->out_state[n][i] = 0;
for (int j = 0; j < expert_num; j++) {
mre_s->out_state[n][i] += mre_s->gate[j][n] *
mre_s->expert_rnn_s[j]->out_state[n][i];
}
}
}
}
void mre_forward_dynamics (struct mre_state *mre_s)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < mre_s->mre->expert_num; i++) {
rnn_forward_dynamics(mre_s->expert_rnn_s[i]);
}
set_out_state_from_expert_rnns(mre_s);
}
void mre_forward_dynamics_forall (struct mixture_of_rnn_experts *mre)
{
#ifdef _OPENMP
const int total_num = mre->series_num * mre->expert_num;
#pragma omp parallel
{
#pragma omp for
for (int n = 0; n < total_num; n++) {
int i = n / mre->expert_num;
int j = n % mre->expert_num;
rnn_forward_dynamics(mre->mre_s[i].expert_rnn_s[j]);
}
#pragma omp for
for (int i = 0; i < mre->series_num; i++) {
set_out_state_from_expert_rnns(mre->mre_s + i);
}
}
#else
for (int i = 0; i < mre->series_num; i++) {
mre_forward_dynamics(mre->mre_s + i);
}
#endif
}
void mre_forward_dynamics_in_closed_loop (
struct mre_state *mre_s,
int delay_length)
{
struct rnn_state *rnn_s;
assert(mre_s->mre->in_state_size <= mre_s->mre->out_state_size);
for (int n = 0; n < mre_s->length; n++) {
if (n == 0) {
for (int i = 0; i < mre_s->mre->expert_num; i++) {
rnn_s = mre_s->expert_rnn_s[i];
rnn_forward_map(rnn_s->rnn_p, rnn_s->in_state[0],
rnn_s->init_c_inter_state, rnn_s->init_c_state,
rnn_s->c_inputsum[0], rnn_s->c_inter_state[0],
rnn_s->c_state[0], rnn_s->o_inter_state[0],
rnn_s->out_state[0]);
}
} else if (n < delay_length) {
for (int i = 0; i < mre_s->mre->expert_num; i++) {
rnn_s = mre_s->expert_rnn_s[i];
rnn_forward_map(rnn_s->rnn_p, rnn_s->in_state[n],
rnn_s->c_inter_state[n-1], rnn_s->c_state[n-1],
rnn_s->c_inputsum[n], rnn_s->c_inter_state[n],
rnn_s->c_state[n], rnn_s->o_inter_state[n],
rnn_s->out_state[n]);
}
} else {
for (int i = 0; i < mre_s->mre->expert_num; i++) {
rnn_s = mre_s->expert_rnn_s[i];
rnn_forward_map(rnn_s->rnn_p, mre_s->out_state[n-delay_length],
rnn_s->c_inter_state[n-1], rnn_s->c_state[n-1],
rnn_s->c_inputsum[n], rnn_s->c_inter_state[n],
rnn_s->c_state[n], rnn_s->o_inter_state[n],
rnn_s->out_state[n]);
}
}
for (int i = 0; i < mre_s->mre->out_state_size; i++) {
mre_s->out_state[n][i] = 0;
for (int j = 0; j < mre_s->mre->expert_num; j++) {
mre_s->out_state[n][i] += mre_s->gate[j][n] *
mre_s->expert_rnn_s[j]->out_state[n][i];
}
}
}
}
void mre_forward_dynamics_in_closed_loop_forall (
struct mixture_of_rnn_experts *mre,
int delay_length)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < mre->series_num; i++) {
mre_forward_dynamics_in_closed_loop(mre->mre_s + i, delay_length);
}
}
static double gauss_func (
const double *x,
const double *y,
int dimension,
double variance)
{
double sum = 0;
for (int i = 0; i < dimension; i++) {
double d = x[i] - y[i];
sum += d * d;
}
return exp((-sum) / (2*variance)) / pow((2*M_PI)*variance, dimension/2.0);
}
static inline void gmap (
const struct rnn_state *rnn_s,
double *generation_likelihood)
{
const int length = rnn_s->length;
for (int n = 0; n < length; n++) {
generation_likelihood[n] = gauss_func(rnn_s->out_state[n],
rnn_s->teach_state[n], rnn_s->rnn_p->out_state_size,
rnn_s->rnn_p->variance);
}
}
void mre_set_generation_likelihood (struct mre_state *mre_s)
{
const int expert_num = mre_s->mre->expert_num;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < expert_num; i++) {
gmap(mre_s->expert_rnn_s[i], mre_s->generation_likelihood[i]);
}
}
void mre_set_joint_likelihood (struct mre_state *mre_s)
{
const int expert_num = mre_s->mre->expert_num;
const int length = mre_s->length;
for (int n = 0; n < length; n++) {
mre_s->joint_likelihood[n] = 0;
for (int i = 0; i < expert_num; i++) {
mre_s->joint_likelihood[n] += mre_s->gate[i][n] *
mre_s->generation_likelihood[i][n];
}
}
}
static inline void dmap (
const int length,
const double * const restrict gate,
const double * const restrict generation_likelihood,
const double * const restrict joint_likelihood,
double * restrict discrimination_likelihood)
{
for (int n = 0; n < length; n++) {
discrimination_likelihood[n] = (gate[n] * generation_likelihood[n]) /
joint_likelihood[n];
}
}
void mre_set_discrimination_likelihood (struct mre_state *mre_s)
{
const int expert_num = mre_s->mre->expert_num;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < expert_num; i++) {
dmap(mre_s->length, mre_s->gate[i], mre_s->generation_likelihood[i],
mre_s->joint_likelihood, mre_s->discrimination_likelihood[i]);
}
}
static inline void pmap (
const enum gate_distribution_t gate_prior_distribution,
const int length,
const double gamma,
const double * const restrict beta,
double * restrict prior_likelihood)
{
switch (gate_prior_distribution) {
case NO_DISTRIBUTION:
for (int n = 0; n < length; n++) {
prior_likelihood[n] = 1;
}
break;
case GAUSS_DISTRIBUTION:
{
double per_gamma2 = 1.0 / (gamma * gamma);
double c = 1.0 / (sqrt(2*M_PI) * gamma);
prior_likelihood[0] = 1;
for (int n = 1; n < length; n++) {
double x = beta[n] - beta[n-1];
x = x * x;
prior_likelihood[n] = exp(-0.5 * x * per_gamma2) * c;
}
}
break;
case CAUCHY_DISTRIBUTION:
{
double gamma2 = gamma * gamma;
prior_likelihood[0] = 1;
for (int n = 1; n < length; n++) {
double x = beta[n] - beta[n-1];
x = x * x;
prior_likelihood[n] = gamma / (M_PI * (x + gamma2));
}
}
break;
}
}
void mre_set_prior_likelihood (struct mre_state *mre_s)
{
const struct mixture_of_rnn_experts *mre = mre_s->mre;
const int expert_num = mre->expert_num;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < expert_num; i++) {
pmap(mre->gate_prior_distribution, mre_s->length, mre->gamma[i],
mre_s->beta[i], mre_s->prior_likelihood[i]);
}
}
void mre_set_likelihood_of_expert (
struct rnn_state *rnn_s,
double *discrimination_likelihood)
{
const int length = rnn_s->length;
const int out_state_size = rnn_s->rnn_p->out_state_size;
rnn_set_likelihood(rnn_s);
for (int n = 0; n < length; n++) {
for (int i = 0; i < out_state_size; i++) {
rnn_s->delta_likelihood[n][i] *= discrimination_likelihood[n];
rnn_s->likelihood[n][i] *= discrimination_likelihood[n];
}
}
}
void mre_set_likelihood (struct mre_state *mre_s)
{
mre_set_generation_likelihood(mre_s);
mre_set_joint_likelihood(mre_s);
mre_set_discrimination_likelihood(mre_s);
mre_set_prior_likelihood(mre_s);
const int expert_num = mre_s->mre->expert_num;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < expert_num; i++) {
mre_set_likelihood_of_expert(mre_s->expert_rnn_s[i],
mre_s->discrimination_likelihood[i]);
}
}
void mre_set_likelihood_forall (struct mixture_of_rnn_experts *mre)
{
#ifdef _OPENMP
const int total_num = mre->series_num * mre->expert_num;
#pragma omp parallel
{
#pragma omp for
for (int n = 0; n < total_num; n++) {
int i = n / mre->expert_num;
int j = n % mre->expert_num;
gmap(mre->mre_s[i].expert_rnn_s[j],
mre->mre_s[i].generation_likelihood[j]);
}
#pragma omp for
for (int i = 0; i < mre->series_num; i++) {
mre_set_joint_likelihood(mre->mre_s + i);
}
#pragma omp for
for (int n = 0; n < total_num; n++) {
int i = n / mre->expert_num;
int j = n % mre->expert_num;
dmap(mre->mre_s[i].length, mre->mre_s[i].gate[j],
mre->mre_s[i].generation_likelihood[j],
mre->mre_s[i].joint_likelihood,
mre->mre_s[i].discrimination_likelihood[j]);
pmap(mre->gate_prior_distribution, mre->mre_s[i].length,
mre->gamma[j], mre->mre_s[i].beta[j],
mre->mre_s[i].prior_likelihood[j]);
mre_set_likelihood_of_expert(mre->mre_s[i].expert_rnn_s[j],
mre->mre_s[i].discrimination_likelihood[j]);
}
}
#else
for (int i = 0; i < mre->series_num; i++) {
mre_set_likelihood(mre->mre_s + i);
}
#endif
}
void mre_backward_dynamics (struct mre_state *mre_s)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < mre_s->mre->expert_num; i++) {
rnn_backward_dynamics(mre_s->expert_rnn_s[i]);
}
}
void mre_backward_dynamics_forall (struct mixture_of_rnn_experts *mre)
{
#ifdef _OPENMP
const int total_num = mre->series_num * mre->expert_num;
#pragma omp parallel for
for (int n = 0; n < total_num; n++) {
int i = n / mre->expert_num;
int j = n % mre->expert_num;
rnn_backward_dynamics(mre->mre_s[i].expert_rnn_s[j]);
}
#else
for (int i = 0; i < mre->series_num; i++) {
mre_backward_dynamics(mre->mre_s + i);
}
#endif
}
void mre_forward_backward_dynamics (struct mre_state *mre_s)
{
mre_forward_dynamics(mre_s);
mre_set_likelihood(mre_s);
mre_backward_dynamics(mre_s);
}
void mre_forward_backward_dynamics_forall (struct mixture_of_rnn_experts *mre)
{
mre_forward_dynamics_forall(mre);
mre_set_likelihood_forall(mre);
mre_backward_dynamics_forall(mre);
}
void mre_update_delta_beta (
struct mre_state *mre_s,
double momentum)
{
double sum, per_gamma2 = 0, gamma2 = 0;
const struct mixture_of_rnn_experts *mre = mre_s->mre;
const int expert_num = mre->expert_num;
const int length = mre_s->length;
const enum gate_distribution_t gate_prior_distribution =
mre->gate_prior_distribution;
for (int i = 0; i < expert_num; i++) {
switch (gate_prior_distribution) {
case NO_DISTRIBUTION:
break;
case GAUSS_DISTRIBUTION:
per_gamma2 = 1.0 / (mre->gamma[i] * mre->gamma[i]);
break;
case CAUCHY_DISTRIBUTION:
gamma2 = mre->gamma[i] * mre->gamma[i];
break;
}
for (int n = 0; n < length; n++) {
double delta = (mre_s->gate[i][n] / mre_s->joint_likelihood[n]) *
(mre_s->generation_likelihood[i][n] -
mre_s->joint_likelihood[n]);
switch (gate_prior_distribution) {
case NO_DISTRIBUTION:
break;
case GAUSS_DISTRIBUTION:
sum = 0;
if (n < length - 1) {
sum += (mre_s->beta[i][n+1] - mre_s->beta[i][n]);
}
if (n > 0) {
sum += -(mre_s->beta[i][n] - mre_s->beta[i][n-1]);
}
delta += sum * per_gamma2;
break;
case CAUCHY_DISTRIBUTION:
sum = 0;
if (n < length - 1) {
double d = mre_s->beta[i][n+1] - mre_s->beta[i][n];
sum += (2 * d) / (d * d + gamma2);
}
if (n > 0) {
double d = mre_s->beta[i][n] - mre_s->beta[i][n-1];
sum += -(2 * d) / (d * d + gamma2);
}
delta += sum;
break;
}
mre_s->delta_beta[i][n] = delta + momentum *
mre_s->delta_beta[i][n];
}
}
}
void mre_update_beta_and_gate (
struct mre_state *mre_s,
double rho)
{
for (int n = 0; n < mre_s->length; n++) {
for (int i = 0; i < mre_s->mre->expert_num; i++) {
mre_s->beta[i][n] += (rho * mre_s->delta_beta[i][n]);
assert(isfinite(mre_s->beta[i][n]));
}
}
for (int n = 0; n < mre_s->length; n++) {
double sum = 0;
for (int i = 0; i < mre_s->mre->expert_num; i++) {
sum += exp(mre_s->beta[i][n]);
}
for (int i = 0; i < mre_s->mre->expert_num; i++) {
mre_s->gate[i][n] = exp(mre_s->beta[i][n]) / sum;
}
}
}
void mre_update_delta_parameters (
struct mixture_of_rnn_experts *mre,
double momentum)
{
for (int i = 0; i < mre->expert_num; i++) {
rnn_update_delta_parameters(mre->expert_rnn + i, momentum);
}
if (!mre->fixed_gate) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < mre->series_num; i++) {
mre_update_delta_beta(mre->mre_s + i, momentum);
}
}
}
void mre_update_parameters (
struct mixture_of_rnn_experts *mre,
double rho_gate,
double rho_weight,
double rho_tau,
double rho_init,
double rho_sigma)
{
for (int i = 0; i < mre->expert_num; i++) {
rnn_update_parameters(mre->expert_rnn + i, rho_weight, rho_tau,
rho_init, rho_sigma);
}
if (!mre->fixed_gate) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < mre->series_num; i++) {
mre_update_beta_and_gate(mre->mre_s + i, rho_gate);
}
}
}
/*
* This function computes learning of a mixture of rnn experts
*
* @parameter mre : mixture of rnn experts
* @parameter rho_gate : learning rate for gate opening values
* @parameter rho_weight : learning rate for weights and thresholds
* @parameter rho_tau : learning rate for tau
* @parameter rho_init : learning rate for initial states
* @parameter rho_sigma : learning rate for sigma
* @parameter momentum : momentum of learning
*/
void mre_learn (
struct mixture_of_rnn_experts *mre,
double rho_gate,
double rho_weight,
double rho_tau,
double rho_init,
double rho_sigma,
double momentum)
{
mre_forward_backward_dynamics_forall(mre);
mre_update_delta_parameters(mre, momentum);
mre_update_parameters(mre, rho_gate, rho_weight, rho_tau, rho_init,
rho_sigma);
}
/*
* This function computes learning of a mixture of rnn experts
* (support automatic scaling of learning rate)
*
* @parameter mre : mixture of rnn experts
* @parameter rho : learning rate
* @parameter momentum : momentum of learning
*/
void mre_learn_s (
struct mixture_of_rnn_experts *mre,
double rho,
double momentum)
{
double r = 1.0 / (mre_get_total_length(mre) * mre->out_state_size);
double rho_weight = r * rho;
double rho_tau = r * rho;
double rho_sigma = r * rho;
double rho_init = rho / mre->out_state_size;
mre_learn(mre, rho, rho_weight, rho_tau, rho_init, rho_sigma, momentum);
}
#ifdef ENABLE_ADAPTIVE_LEARNING_RATE
void mre_backup_learning_parameters (struct mixture_of_rnn_experts *mre)
{
for (int i = 0; i < mre->expert_num; i++) {
rnn_backup_learning_parameters(mre->expert_rnn + i);
}
for (int i = 0; i < mre->series_num; i++) {
struct mre_state *mre_s = mre->mre_s + i;
memmove(mre_s->tmp_gate, mre_s->gate[0], sizeof(double) *
mre_s->length * mre->expert_num);
memmove(mre_s->tmp_beta, mre_s->beta[0], sizeof(double) *
mre_s->length * mre->expert_num);
}
}
void mre_restore_learning_parameters (struct mixture_of_rnn_experts *mre)
{
for (int i = 0; i < mre->expert_num; i++) {
rnn_restore_learning_parameters(mre->expert_rnn + i);
}
for (int i = 0; i < mre->series_num; i++) {
struct mre_state *mre_s = mre->mre_s + i;
memmove(mre_s->gate[0], mre_s->tmp_gate, sizeof(double) *
mre_s->length * mre->expert_num);
memmove(mre_s->beta[0], mre_s->tmp_beta, sizeof(double) *
mre_s->length * mre->expert_num);
}
}
double mre_update_parameters_with_adapt_lr (
struct mixture_of_rnn_experts *mre,
double adapt_lr,
double rho_gate,
double rho_weight,
double rho_tau,
double rho_init,
double rho_sigma)
{
double current_error = mre_get_total_error(mre);
mre_backup_learning_parameters(mre);
for (int count = 0; count < MAX_ITERATION_IN_ADAPTIVE_LR; count++) {
mre_update_parameters(mre, rho_gate * adapt_lr, rho_weight * adapt_lr,
rho_tau * adapt_lr, rho_init * adapt_lr, rho_sigma * adapt_lr);
mre_forward_dynamics_forall(mre);
double next_error = mre_get_total_error(mre);
double rate = next_error / current_error;
if (rate > MAX_PERF_INC || isnan(rate)) {
mre_restore_learning_parameters(mre);
adapt_lr *= LR_DEC;
} else {
if (rate < 1) {
adapt_lr *= LR_INC;
}
break;
}
}
return adapt_lr;
}
/*
* This function computes learning of a mixture of rnn experts
* (support adaptive learning rate)
*
* @parameter mre : mixture of rnn experts
* @parameter adapt_lr : adaptive learning rate
* @parameter rho_gate : learning rate for gate opening values
* @parameter rho_weight : learning rate for weights and thresholds
* @parameter rho_tau : learning rate for tau
* @parameter rho_init : learning rate for initial states
* @parameter rho_sigma : learning rate for sigma
* @parameter momentum : momentum of learning
*
* @return : adaptive learning rate
*/
double mre_learn_with_adapt_lr (
struct mixture_of_rnn_experts *mre,
double adapt_lr,
double rho_gate,
double rho_weight,
double rho_tau,
double rho_init,
double rho_sigma,
double momentum)
{
mre_forward_backward_dynamics_forall(mre);
mre_update_delta_parameters(mre, momentum);
return mre_update_parameters_with_adapt_lr(mre, adapt_lr, rho_gate,
rho_weight, rho_tau, rho_init, rho_sigma);
}
/*
* This function computes learning of a mixture of rnn experts
* (support adaptive learning rate and automatic scaling of learning rate)
*
* @parameter mre : mixture of rnn experts
* @parameter adapt_lr : adaptive learning rate
* @parameter rho : learning rate
* @parameter momentum : momentum of learning
*/
double mre_learn_s_with_adapt_lr (
struct mixture_of_rnn_experts *mre,
double adapt_lr,
double rho,
double momentum)
{
double r = 1.0 / (mre_get_total_length(mre) * mre->out_state_size);
double rho_weight = r * rho;
double rho_tau = r * rho;
double rho_sigma = r * rho;
double rho_init = rho / mre->out_state_size;
return mre_learn_with_adapt_lr(mre, adapt_lr, rho, rho_weight, rho_tau,
rho_init, rho_sigma, momentum);
}
#endif // ENABLE_ADAPTIVE_LEARNING_RATE
void mre_update_prior_strength (
struct mixture_of_rnn_experts *mre,
double lambda,
double alpha)
{
double likelihood;
struct rnn_parameters *rnn_p;
mre_forward_dynamics_forall(mre);
mre_set_likelihood_forall(mre);
for (int i = 0; i < mre->expert_num; i++) {
likelihood = 0;
for (int j = 0; j < mre->series_num; j++) {
for (int n = 0; n < mre->mre_s[j].length; n++) {
likelihood += mre->mre_s[j].discrimination_likelihood[i][n] *
mre->mre_s[j].joint_likelihood[n];
}
}
rnn_p = &mre->expert_rnn[i].rnn_p;
rnn_p->prior_strength = lambda * rnn_p->prior_strength +
alpha * likelihood;
rnn_reset_prior_distribution(rnn_p);
}
}
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/effect.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/layer.h"
#include "magick/list.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/resource_.h"
#include "magick/resize.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o O r i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoOrientImage() adjusts an image so that its orientation is suitable for
% viewing (i.e. top-left orientation).
%
% The format of the AutoOrientImage method is:
%
% Image *AutoOrientImage(const Image *image,
% const OrientationType orientation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o orientation: Current image orientation.
%
% o exception: Return any errors or warnings in this structure.
%
*/
MagickExport Image *AutoOrientImage(const Image *image,
const OrientationType orientation,ExceptionInfo *exception)
{
Image
*orient_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
orient_image=(Image *) NULL;
switch(orientation)
{
case UndefinedOrientation:
case TopLeftOrientation:
default:
{
orient_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case TopRightOrientation:
{
orient_image=FlopImage(image,exception);
break;
}
case BottomRightOrientation:
{
orient_image=RotateImage(image,180.0,exception);
break;
}
case BottomLeftOrientation:
{
orient_image=FlipImage(image,exception);
break;
}
case LeftTopOrientation:
{
orient_image=TransposeImage(image,exception);
break;
}
case RightTopOrientation:
{
orient_image=RotateImage(image,90.0,exception);
break;
}
case RightBottomOrientation:
{
orient_image=TransverseImage(image,exception);
break;
}
case LeftBottomOrientation:
{
orient_image=RotateImage(image,270.0,exception);
break;
}
}
if (orient_image != (Image *) NULL)
orient_image->orientation=TopLeftOrientation;
return(orient_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
chop_view=AcquireAuthenticCacheView(chop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,chop_image,1,1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict chop_indexes,
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=GetPixelIndex(indexes+x);
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ChopImage)
#endif
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict chop_indexes,
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=GetPixelIndex(indexes+x);
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ChopImage)
#endif
proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
if (status == MagickFalse)
chop_image=DestroyImage(chop_image);
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
register ssize_t
i;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
cmyk_images=NewImageList();
for (i=0; i < (ssize_t) GetImageListLength(images); i+=4)
{
cmyk_image=CloneImage(images,images->columns,images->rows,MagickTrue,
exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace);
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->green=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict p;
register ssize_t
x;
register PixelPacket
*restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->blue=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewAuthenticIndexQueue(cmyk_view);
for (x=0; x < (ssize_t) images->columns; x++)
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange-
GetPixelIntensity(images,p)));
p++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
AppendImageToList(&cmyk_images,cmyk_image);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
if (((ssize_t) (bounding_box.x+bounding_box.width) > (ssize_t) image->page.width) ||
((ssize_t) (bounding_box.y+bounding_box.height) > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
crop_view=AcquireAuthenticCacheView(crop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,crop_image,1,1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict crop_indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
crop_indexes=GetCacheViewAuthenticIndexQueue(crop_view);
(void) CopyMagickMemory(q,p,(size_t) crop_image->columns*sizeof(*p));
if ((indexes != (IndexPacket *) NULL) &&
(crop_indexes != (IndexPacket *) NULL))
(void) CopyMagickMemory(crop_indexes,indexes,(size_t) crop_image->columns*
sizeof(*crop_indexes));
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CropImage)
#endif
proceed=SetImageProgress(image,CropImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((MagickRealType) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) MagickRound((MagickRealType) (offset.y+
(geometry.y < 0 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((MagickRealType) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) MagickRound((MagickRealType) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,excerpt_image,excerpt_image->rows,1)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict excerpt_indexes,
*restrict indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) CopyMagickMemory(q,p,(size_t) excerpt_image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
excerpt_indexes=GetCacheViewAuthenticIndexQueue(excerpt_view);
if (excerpt_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(excerpt_indexes,indexes,(size_t)
excerpt_image->columns*sizeof(*excerpt_indexes));
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ExcerptImage)
#endif
proceed=SetImageProgress(image,ExcerptImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageBackgroundColor(extent_image);
(void) CompositeImage(extent_image,image->compose,image,-geometry->x,
-geometry->y);
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
flip_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flip_view=AcquireAuthenticCacheView(flip_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,flip_image,1,1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict flip_indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewVirtualIndexQueue(image_view);
if (indexes != (const IndexPacket *) NULL)
{
flip_indexes=GetCacheViewAuthenticIndexQueue(flip_view);
if (flip_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(flip_indexes,indexes,(size_t) image->columns*
sizeof(*flip_indexes));
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FlipImage)
#endif
proceed=SetImageProgress(image,FlipImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
flop_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,flop_image,1,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict flop_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=flop_image->columns;
indexes=GetCacheViewVirtualIndexQueue(image_view);
flop_indexes=GetCacheViewAuthenticIndexQueue(flop_view);
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
(*--q)=(*p++);
if ((indexes != (const IndexPacket *) NULL) &&
(flop_indexes != (IndexPacket *) NULL))
SetPixelIndex(flop_indexes+flop_image->columns-x-1,
GetPixelIndex(indexes+x));
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FlopImage)
#endif
proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyImageRegion(Image *destination,
const Image *source,const size_t columns,const size_t rows,
const ssize_t sx,const ssize_t sy,const ssize_t dx,const ssize_t dy,
ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
if (columns == 0)
return(MagickTrue);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source,exception);
destination_view=AcquireAuthenticCacheView(destination,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(source,destination,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict destination_indexes;
register PixelPacket
*restrict q;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source_view);
(void) CopyMagickMemory(q,p,(size_t) columns*sizeof(*p));
if (indexes != (IndexPacket *) NULL)
{
destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view);
if (destination_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(destination_indexes,indexes,(size_t)
columns*sizeof(*indexes));
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
roll_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass) == MagickFalse)
{
InheritException(exception,&splice_image->exception);
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
(void) SetImageBackgroundColor(splice_image);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case StaticGravity:
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,splice_image,1,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes,
*restrict splice_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < splice_geometry.x; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,splice_image,1,1)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes,
*restrict splice_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
image->columns,1,exception);
if ((y < 0) || (y >= (ssize_t) splice_image->rows))
continue;
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < splice_geometry.x; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
/*
DANGER: This function destroys what it assumes to be a single image list.
If the input image is part of a larger list, all other images in that list
will be simply 'lost', not destroyed.
Also if the crop generates a list of images only the first image is resized.
And finally if the crop succeeds and the resize failed, you will get a
cropped image, as well as a 'false' or 'failed' report.
This function and should probably be deprecated in favor of direct calls
to CropImageToTiles() or ResizeImage(), as appropriate.
*/
MagickExport MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry)
{
Image
*resize_image,
*transform_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,&(*image)->exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
flags=ParseRegionGeometry(transform_image,image_geometry,&geometry,
&(*image)->exception);
(void) flags;
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,transform_image->blur,&(*image)->exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImages() calls TransformImage() on each image of a sequence.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImages(Image **image,
% const char *crop_geometry,const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
MagickExport MagickBooleanType TransformImages(Image **images,
const char *crop_geometry,const char *image_geometry)
{
Image
*image,
**image_list,
*transform_images;
MagickStatusType
status;
register ssize_t
i;
assert(images != (Image **) NULL);
assert((*images)->signature == MagickSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
image_list=ImageListToArray(*images,&(*images)->exception);
if (image_list == (Image **) NULL)
return(MagickFalse);
status=MagickTrue;
transform_images=NewImageList();
for (i=0; image_list[i] != (Image *) NULL; i++)
{
image=image_list[i];
status&=TransformImage(&image,crop_geometry,image_geometry);
AppendImageToList(&transform_images,image);
}
*images=transform_images;
image_list=(Image **) RelinquishMagickMemory(image_list);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transpose_view=AcquireAuthenticCacheView(transpose_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,transpose_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register IndexPacket
*restrict transpose_indexes,
*restrict indexes;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) CopyMagickMemory(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transpose_indexes=GetCacheViewAuthenticIndexQueue(transpose_view);
if (transpose_indexes != (IndexPacket *) NULL)
(void) CopyMagickMemory(transpose_indexes,indexes,(size_t)
image->columns*sizeof(*transpose_indexes));
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,TransposeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transverse_view=AcquireAuthenticCacheView(transverse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,transverse_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict transverse_indexes,
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-
1),0,1,transverse_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
*--q=(*p++);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transverse_indexes=GetCacheViewAuthenticIndexQueue(transverse_view);
if (transverse_indexes != (IndexPacket *) NULL)
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(transverse_indexes+image->columns-x-1,
GetPixelIndex(indexes+x));
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransverseImage)
#endif
proceed=SetImageProgress(image,TransverseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
return(CropImage(image,&geometry,exception));
}
|
omp_sections_reduction.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int test_omp_sections_reduction()
{
int sum;
int known_sum;
double dpt,dsum;
double dknown_sum;
double dt=0.5; /* base of geometric row for + and - test*/
double rounding_error= 1.E-9;
int diff;
double ddiff;
int product;
int known_product;
int logic_and;
int bit_and;
int logic_or;
int bit_or;
int exclusiv_bit_or;
int logics[1000];
int i;
int result;
/* int my_islarger; */
/*int is_larger=1;*/
sum =7;
dpt =1;
dsum=0;
product =1;
logic_and=1;
bit_and=1;
logic_or=0;
bit_or=0;
exclusiv_bit_or=0;
result = 0;
dt = 1./3.;
known_sum = (999*1000)/2+7;
#pragma omp parallel
{
#pragma omp sections private(i) reduction(+:sum)
{
#pragma omp section
{
for (i=1;i<300;i++) {
sum=sum+i;
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
sum=sum+i;
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
sum=sum+i;
}
}
}
}
if(known_sum!=sum) {
++result;
fprintf(stderr,"Error in sum with integers: Result was %d"
" instead of %d\n", sum,known_sum);
}
diff = (999*1000)/2;
#pragma omp parallel
{
#pragma omp sections private(i) reduction(-:diff)
{
#pragma omp section
{
for (i=1;i<300;i++) {
diff=diff-i;
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
diff=diff-i;
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
diff=diff-i;
}
}
}
}
if(diff != 0) {
result++;
fprintf(stderr,"Error in Difference with integers: Result was %d"
" instead of 0.\n",diff);
}
for (i=0;i<20;++i) {
dpt*=dt;
}
dknown_sum = (1-dpt)/(1-dt);
#pragma omp parallel
{
#pragma omp sections private(i) reduction(+:dsum)
{
#pragma omp section
{
for (i=0;i<6;++i) {
dsum += pow(dt,i);
}
}
#pragma omp section
{
for (i=6;i<12;++i) {
dsum += pow(dt,i);
}
}
#pragma omp section
{
for (i=12;i<20;++i) {
dsum += pow(dt,i);
}
}
}
}
if( fabs(dsum-dknown_sum) > rounding_error ) {
result++;
fprintf(stderr,"Error in sum with doubles: Result was %f"
" instead of %f (Difference: %E)\n",
dsum, dknown_sum, dsum-dknown_sum);
}
dpt=1;
for (i=0;i<20;++i) {
dpt*=dt;
}
fprintf(stderr,"\n");
ddiff = (1-dpt)/(1-dt);
#pragma omp parallel
{
#pragma omp sections private(i) reduction(-:ddiff)
{
#pragma omp section
{
for (i=0;i<6;++i) {
ddiff -= pow(dt,i);
}
}
#pragma omp section
{
for (i=6;i<12;++i) {
ddiff -= pow(dt,i);
}
}
#pragma omp section
{
for (i=12;i<20;++i) {
ddiff -= pow(dt,i);
}
}
}
}
if(fabs(ddiff) > rounding_error) {
result++;
fprintf(stderr,"Error in Difference with doubles: Result was %E"
" instead of 0.0\n",ddiff);
}
known_product = 3628800;
#pragma omp parallel
{
#pragma omp sections private(i) reduction(*:product)
{
#pragma omp section
{
for(i=1;i<3;i++) {
product *= i;
}
}
#pragma omp section
{
for(i=3;i<7;i++) {
product *= i;
}
}
#pragma omp section
{
for(i=7;i<11;i++) {
product *= i;
}
}
}
}
if(known_product != product) {
result++;
fprintf(stderr,"Error in Product with integers: Result was %d"
" instead of %d\n",product,known_product);
}
for(i=0;i<1000;i++) {
logics[i]=1;
}
#pragma omp parallel
{
#pragma omp sections private(i) reduction(&&:logic_and)
{
#pragma omp section
{
for (i=1;i<300;i++) {
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
logic_and = (logic_and && logics[i]);
}
}
}
}
if(!logic_and) {
result++;
fprintf(stderr,"Error in logic AND part 1\n");
}
logic_and = 1;
logics[501] = 0;
#pragma omp parallel
{
#pragma omp sections private(i) reduction(&&:logic_and)
{
#pragma omp section
{
for (i=1;i<300;i++) {
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
logic_and = (logic_and && logics[i]);
}
}
}
}
if(logic_and) {
result++;
fprintf(stderr,"Error in logic AND part 2\n");
}
for(i=0;i<1000;i++) {
logics[i]=0;
}
#pragma omp parallel
{
#pragma omp sections private(i) reduction(||:logic_or)
{
#pragma omp section
{
for (i=1;i<300;i++) {
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
logic_or = (logic_or || logics[i]);
}
}
}
}
if(logic_or) {
result++;
fprintf(stderr,"\nError in logic OR part 1\n");
}
logic_or = 0;
logics[501]=1;
#pragma omp parallel
{
#pragma omp sections private(i) reduction(||:logic_or)
{
#pragma omp section
{
for (i=1;i<300;i++) {
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
logic_or = (logic_or || logics[i]);
}
}
}
}
if(!logic_or) {
result++;
fprintf(stderr,"Error in logic OR part 2\n");
}
for(i=0;i<1000;++i) {
logics[i]=1;
}
#pragma omp parallel
{
#pragma omp sections private(i) reduction(&:bit_and)
{
#pragma omp section
{
for(i=0;i<300;++i) {
bit_and = (bit_and & logics[i]);
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
bit_and = (bit_and & logics[i]);
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
bit_and = (bit_and & logics[i]);
}
}
}
}
if(!bit_and) {
result++;
fprintf(stderr,"Error in BIT AND part 1\n");
}
bit_and = 1;
logics[501]=0;
#pragma omp parallel
{
#pragma omp sections private(i) reduction(&:bit_and)
{
#pragma omp section
{
for(i=0;i<300;++i) {
bit_and = bit_and & logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
bit_and = bit_and & logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
bit_and = bit_and & logics[i];
}
}
}
}
if(bit_and) {
result++;
fprintf(stderr,"Error in BIT AND part 2\n");
}
for(i=0;i<1000;i++) {
logics[i]=0;
}
#pragma omp parallel
{
#pragma omp sections private(i) reduction(|:bit_or)
{
#pragma omp section
{
for(i=0;i<300;++i) {
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
bit_or = bit_or | logics[i];
}
}
}
}
if(bit_or) {
result++;
fprintf(stderr,"Error in BIT OR part 1\n");
}
bit_or = 0;
logics[501]=1;
#pragma omp parallel
{
#pragma omp sections private(i) reduction(|:bit_or)
{
#pragma omp section
{
for(i=0;i<300;++i) {
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
bit_or = bit_or | logics[i];
}
}
}
}
if(!bit_or) {
result++;
fprintf(stderr,"Error in BIT OR part 2\n");
}
for(i=0;i<1000;i++) {
logics[i]=0;
}
#pragma omp parallel
{
#pragma omp sections private(i) reduction(^:exclusiv_bit_or)
{
#pragma omp section
{
for(i=0;i<300;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
}
}
if(exclusiv_bit_or) {
result++;
fprintf(stderr,"Error in EXCLUSIV BIT OR part 1\n");
}
exclusiv_bit_or = 0;
logics[501]=1;
#pragma omp parallel
{
#pragma omp sections private(i) reduction(^:exclusiv_bit_or)
{
#pragma omp section
{
for(i=0;i<300;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
}
}
if(!exclusiv_bit_or) {
result++;
fprintf(stderr,"Error in EXCLUSIV BIT OR part 2\n");
}
/*printf("\nResult:%d\n",result);*/
return (result==0);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_sections_reduction()) {
num_failed++;
}
}
return num_failed;
}
|
ast-dump-openmp-sections.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_zero() {
#pragma omp sections
{}
}
void test_one() {
#pragma omp sections
{ ; }
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-sections.c:3:1, line:6:1> line:3:6 test_zero 'void ()'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:18, line:6:1>
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:8:1, line:11:1> line:8:6 test_one 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:17, line:11:1>
// CHECK-NEXT: `-OMPSectionsDirective {{.*}} <line:9:1, col:21>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:10:3, col:7>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: |-CompoundStmt {{.*}} <col:3, col:7> openmp_structured_block
// CHECK-NEXT: | `-NullStmt {{.*}} <col:5>
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:9:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-sections.c:9:1) *const restrict'
|
2d.par.c |
/*@ begin PerfTuning (
def build
{
arg command = 'icc';
arg options = '-fast -openmp -I/usr/local/icc/include -lm';
}
def performance_counter
{
arg method = 'basic timer';
arg repetitions = 2;
}
def performance_params
{
param T1[] = [1];
param T2[] = [1];
param PERMUTS[] = [
# (['ii'],['jj'],'c5','c6'),
# (['jj'],['ii'],'c5','c6'),
# (['ii'],['jj'],'c6','c5'),
(['jj'],['ii'],'c6','c5'),
];
param U1[] = [1];
param U2[] = [4];
param VEC[] = [False];
}
def search
{
# arg algorithm = 'Simplex';
arg algorithm = 'Exhaustive';
# arg time_limit = 5;
# arg total_runs = 1;
}
def input_params
{
param TVAL = 500;
param NXVAL = 4000;
param NYVAL = 4000;
decl int tmax = TVAL;
decl int nx = NXVAL;
decl int ny = NYVAL;
decl double ex[nx][ny+1] = random;
decl double ey[nx+1][ny] = random;
decl double hz[nx][ny] = random;
}
) @*/
int t, i, j, k, l, m, n, ii, jj;
#define S1(zT0,zT1,t,j) {ey[0][j]=t;}
#define S2(zT0,zT1,zT2,t,i,j) {ey[i][j]=ey[i][j]-((double)(1))/2*(hz[i][j]-hz[i-1][j]);}
#define S3(zT0,zT1,zT2,t,i,j) {ex[i][j]=ex[i][j]-((double)(1))/2*(hz[i][j]-hz[i][j-1]);}
#define S4(zT0,zT1,zT2,t,i,j) {hz[i][j]=hz[i][j]-((double)(7))/10*(ey[1+i][j]+ex[i][1+j]-ex[i][j]-ey[i][j]);}
int c1, c2, c3, c4, c5, c6, c7;
register int lb, ub, lb1, ub1, lb2, ub2;
register int lbv, ubv;
for (c1=-1;c1<=floord(2*tmax+ny-2,32);c1++) {
lb1=max(max(ceild(32*c1-tmax+1,32),ceild(32*c1-31,64)),0);
ub1=min(min(floord(32*c1+ny+31,64),floord(tmax+ny-1,32)),floord(32*c1+31,32));
#pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6,c7)
for (c2=lb1; c2<=ub1; c2++) {
for (c3=max(max(max(max(ceild(32*c2-ny-30,32),0),ceild(64*c1-96*c2-61,32)),ceild(32*c1-32*c2-31,32)),ceild(32*c1-1024*c2-1891,992));c3<=min(min(floord(32*c2+nx+30,32),floord(tmax+nx-1,32)),floord(32*c1-32*c2+nx+31,32));c3++) {
if ((c1 <= floord(32*c2+32*c3-nx,32)) && (c2 <= floord(32*c3-nx+ny,32)) && (c3 >= ceild(nx,32))) {
for (c5=max(32*c2,32*c3-nx+1);c5<=min(32*c3-nx+ny,32*c2+31);c5++) {
S4(c1-c2,-c1+c2+c3,-c1+2*c2,32*c3-nx,nx-1,-32*c3+c5+nx-1) ;
}
}
if ((c1 <= floord(64*c2-ny,32)) && (c2 >= max(ceild(32*c3-nx+ny+1,32),ceild(ny,32)))) {
for (c6=max(32*c3,32*c2-ny+1);c6<=min(32*c2+nx-ny,32*c3+31);c6++) {
S4(c1-c2,-c1+c2+c3,-c1+2*c2,32*c2-ny,-32*c2+c6+ny-1,ny-1) ;
}
}
if (c1 == c2+c3) {
for (c4=max(max(32*c2-ny+1,0),32*c3);c4<=min(min(32*c3+30,32*c2-ny+31),tmax-1);c4++) {
for (c5=32*c2;c5<=c4+ny-1;c5++) {
S1(c1-c2,-c1+2*c2,c4,-c4+c5) ;
S3(c1-c2,0,-c1+2*c2,c4,0,-c4+c5) ;
for (c6=c4+1;c6<=32*c3+31;c6++) {
S2(c1-c2,0,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,0,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,0,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
}
for (c6=c4+1;c6<=32*c3+31;c6++) {
S4(c1-c2,0,-c1+2*c2,c4,-c4+c6-1,ny-1) ;
}
}
}
if (c1 == c2+c3) {
for (c4=max(max(32*c3,0),32*c2-ny+32);c4<=min(min(32*c3+30,tmax-1),32*c2-1);c4++) {
for (c5=32*c2;c5<=32*c2+31;c5++) {
S1(c1-c2,-c1+2*c2,c4,-c4+c5) ;
S3(c1-c2,0,-c1+2*c2,c4,0,-c4+c5) ;
for (c6=c4+1;c6<=32*c3+31;c6++) {
S2(c1-c2,0,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,0,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,0,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
}
}
}
if (c1 == c2+c3) {
for (c4=max(max(32*c2,32*c3),0);c4<=min(min(32*c2+30,32*c3+30),tmax-1);c4++) {
S1(c1-c2,-c1+2*c2,c4,0) ;
for (c6=c4+1;c6<=32*c3+31;c6++) {
S2(c1-c2,0,-c1+2*c2,c4,-c4+c6,0) ;
}
for (c5=c4+1;c5<=32*c2+31;c5++) {
S1(c1-c2,-c1+2*c2,c4,-c4+c5) ;
S3(c1-c2,0,-c1+2*c2,c4,0,-c4+c5) ;
for (c6=c4+1;c6<=32*c3+31;c6++) {
S2(c1-c2,0,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,0,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,0,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
}
}
}
for (c4=max(max(max(32*c1-32*c2,0),32*c2-ny+1),32*c3-nx+1);c4<=min(min(min(32*c3-nx+31,32*c1-32*c2+31),tmax-1),32*c2-ny+31);c4++) {
for (c5=32*c2;c5<=c4+ny-1;c5++) {
for (c6=32*c3;c6<=c4+nx-1;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,nx-1,-c4+c5-1) ;
}
for (c6=32*c3;c6<=c4+nx;c6++) {
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,ny-1) ;
}
}
for (c4=max(max(max(0,32*c3-nx+1),32*c1-32*c2),32*c2-ny+32);c4<=min(min(min(tmax-1,32*c1-32*c2+31),32*c2-1),32*c3-nx+31);c4++) {
for (c5=32*c2;c5<=32*c2+31;c5++) {
for (c6=32*c3;c6<=c4+nx-1;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,nx-1,-c4+c5-1) ;
}
}
for (c4=max(max(max(32*c3-nx+32,32*c1-32*c2),0),32*c2-ny+1);c4<=min(min(min(32*c3-1,32*c1-32*c2+31),tmax-1),32*c2-ny+31);c4++) {
for (c5=32*c2;c5<=c4+ny-1;c5++) {
for (c6=32*c3;c6<=32*c3+31;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
}
for (c6=32*c3;c6<=32*c3+31;c6++) {
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,ny-1) ;
}
}
for (c4=max(max(max(32*c2,0),32*c3-nx+1),32*c1-32*c2);c4<=min(min(min(tmax-1,32*c1-32*c2+31),32*c2+30),32*c3-nx+31);c4++) {
for (c6=32*c3;c6<=c4+nx-1;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,0) ;
}
for (c5=c4+1;c5<=32*c2+31;c5++) {
for (c6=32*c3;c6<=c4+nx-1;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,nx-1,-c4+c5-1) ;
}
}
for (c4=max(max(max(0,32*c1-32*c2),32*c3-nx+32),32*c2-ny+32);c4<=min(min(min(32*c3-1,tmax-1),32*c1-32*c2+31),32*c2-1);c4++) {
/*@ begin Loop(
transform Composite(
tile = [('c5',T1,'ii'),('c6',T2,'jj')],
permut = [PERMUTS],
unrolljam = [('c5',U1),('c6',U2)],
vector = (VEC, ['ivdep','vector always'])
)
for (c5=32*c2;c5<=32*c2+31;c5++)
for (c6=32*c3;c6<=32*c3+31;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
) @*/
for (c5=32*c2;c5<=32*c2+31;c5++)
for (c6=32*c3;c6<=32*c3+31;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
/*@ end @*/
}
for (c4=max(max(max(32*c2,32*c3-nx+32),0),32*c1-32*c2);c4<=min(min(min(32*c3-1,tmax-1),32*c1-32*c2+31),32*c2+30);c4++) {
for (c6=32*c3;c6<=32*c3+31;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,0) ;
}
for (c5=c4+1;c5<=32*c2+31;c5++) {
for (c6=32*c3;c6<=32*c3+31;c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S3(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6,-c4+c5) ;
S4(c1-c2,-c1+c2+c3,-c1+2*c2,c4,-c4+c6-1,-c4+c5-1) ;
}
}
}
if ((-c1 == -c2-c3) && (c1 <= min(floord(64*c3-1,32),floord(32*c3+tmax-32,32)))) {
S1(c3,c1-2*c3,32*c1-32*c3+31,0) ;
for (c6=32*c1-32*c3+32;c6<=32*c3+31;c6++) {
S2(c3,0,c1-2*c3,32*c1-32*c3+31,-32*c1+32*c3+c6-31,0) ;
}
}
if ((-c1 == -c2-c3) && (c1 >= ceild(64*c2-31,32)) && (c1 <= min(floord(32*c2+tmax-32,32),floord(64*c2-1,32)))) {
S1(c1-c2,-c1+2*c2,32*c1-32*c2+31,0) ;
for (c5=32*c1-32*c2+32;c5<=32*c2+31;c5++) {
S1(c1-c2,-c1+2*c2,32*c1-32*c2+31,-32*c1+32*c2+c5-31) ;
S3(c1-c2,0,-c1+2*c2,32*c1-32*c2+31,0,-32*c1+32*c2+c5-31) ;
}
}
if ((-c1 == -c2-c3) && (c1 <= min(floord(32*c2+tmax-32,32),2*c2-1))) {
for (c5=32*c2;c5<=min(32*c2+31,32*c1-32*c2+ny+30);c5++) {
S1(c1-c2,-c1+2*c2,32*c1-32*c2+31,-32*c1+32*c2+c5-31) ;
S3(c1-c2,0,-c1+2*c2,32*c1-32*c2+31,0,-32*c1+32*c2+c5-31) ;
}
}
if ((-c1 == -2*c2) && (-c1 == -2*c3) && (c1 <= floord(tmax-32,16))) {
if (c1%2 == 0) {
S1(c1/2,0,16*c1+31,0) ;
}
}
if ((c1 >= 2*c2) && (c2 <= min(c3-1,floord(tmax-32,32)))) {
for (c6=32*c3;c6<=min(32*c3+31,32*c2+nx+30);c6++) {
S2(c1-c2,-c1+c2+c3,-c1+2*c2,32*c2+31,-32*c2+c6-31,0) ;
}
}
}
}
}
/*@ end @*/
|
pacset_rf_regressor.h | #ifndef PACSET_RF_REG
#define PACSET_RF_REG
#include <vector>
#include <unordered_set>
#include <fstream>
#include "pacset_base_model.h"
#include "packer.h"
#include "config.h"
#include "json_reader.h"
#include "utils.h"
#include "node.h"
#include "MemoryMapped.h"
#define NUM_FILES 10
#define BLOCK_LOGGING 1
template <typename T, typename F>
class PacsetRandomForestRegressor: public PacsetBaseModel<T, F> {
public:
inline void setMembers(const std::vector<int> &bin_sizes,
const std::vector<int> &bin_node_sizes,
const std::vector<std::vector<int>> &bin_start){
PacsetBaseModel<T, F>::bin_sizes.clear();
std::copy(bin_sizes.begin(), bin_sizes.end(), back_inserter(PacsetBaseModel<T, F>::bin_sizes));
std::copy(bin_node_sizes.begin(), bin_node_sizes.end(), back_inserter(PacsetBaseModel<T, F>::bin_node_sizes));
for (auto i: bin_start)
PacsetBaseModel<T, F>::bin_start.push_back(i);
}
inline void setBinNodeSizes(int pos, int siz){
PacsetBaseModel<T, F>::bin_node_sizes[pos] = siz;
}
inline void loadModel() {
JSONReader<T, F> J;
//J.convertSklToBins(PacsetBaseModel<T, F>::bins,
J.convertSklToBinsRapidJson(PacsetBaseModel<T, F>::bins,
PacsetBaseModel<T, F>::bin_sizes,
PacsetBaseModel<T, F>::bin_start,
PacsetBaseModel<T, F>::bin_node_sizes);
}
inline void pack(){
std::string layout = Config::getValue("layout");
auto bin = PacsetBaseModel<T, F>::bins[0];
int num_bins = std::stoi(Config::getValue("numthreads"));
for(int i=0; i<num_bins; ++i){
Packer<T, F> packer_obj(layout);
if(Config::getValue("intertwine") != std::string("notfound"))
packer_obj.setDepthIntertwined(std::atoi(Config::getValue("intertwine").c_str()));
//should pack in place
packer_obj.pack(PacsetBaseModel<T, F>::bins[i],
PacsetBaseModel<T, F>::bin_sizes[i],
PacsetBaseModel<T, F>::bin_start[i]
);
setBinNodeSizes(i, PacsetBaseModel<T, F>::bins[i].size());
}
}
inline int mmapAndPredict(const std::vector<T>& observation, std::vector<double> &preds, int obsnum) {
int num_classes = std::stoi(Config::getValue("numclasses"));
int num_threads = std::stoi(Config::getValue("numthreads"));
int num_bins = PacsetBaseModel<T, F>::bin_sizes.size();
std::vector<double> elapsed_arr;
std::string modelfname = Config::getValue("modelfilename");
MemoryMapped mmapped_obj(modelfname.c_str(), 0);
Node<T, F> *data = (Node<T, F>*)mmapped_obj.getData();
std::unordered_set<int> blocks_accessed;
int next_node = 0;
int block_offset = 0;
int offset = 0;
double leaf_sum = 0;
std::vector<int> offsets;
int curr_offset = 0;
int total_num_trees = 0;
for (auto val: PacsetBaseModel<T, F>::bin_node_sizes){
offsets.push_back(curr_offset);
curr_offset += val;
}
#pragma omp parallel for num_threads(num_threads)
for(int bin_counter=0; bin_counter<num_bins; ++bin_counter){
int block_number = 0;
Node<T, F> *bin = data + offsets[bin_counter];
std::vector<int> curr_node(PacsetBaseModel<T, F>::bin_sizes[bin_counter]);
std::vector<double> last_node(PacsetBaseModel<T, F>::bin_sizes[bin_counter]);
int i, feature_num=0, number_not_in_leaf=0;
T feature_val;
int siz = PacsetBaseModel<T, F>::bin_sizes[bin_counter];
total_num_trees += siz;
for(i=0; i<siz; ++i){
curr_node[i] = PacsetBaseModel<T, F>::bin_start[bin_counter][i];
//bin[curr_node[i]].printNode();
__builtin_prefetch(&bin[curr_node[i]], 0, 3);
#ifdef BLOCK_LOGGING
block_number = (curr_node[i] + block_offset) / BLOCK_SIZE;
#pragma omp critical
blocks_accessed.insert(block_number);
#endif
}
do{
number_not_in_leaf = 0;
for( i=0; i<siz; ++i){
if(curr_node[i] > 0){
feature_num = bin[curr_node[i]].getFeature();
feature_val = observation[feature_num];
if(bin[curr_node[i]].getLeft() == -1){
last_node[i] = bin[curr_node[i]].getThreshold();
curr_node[i] = -1;
}
else {
curr_node[i] = bin[curr_node[i]].nextNode(feature_val);
__builtin_prefetch(&bin[curr_node[i]], 0, 3);
++number_not_in_leaf;
}
}
}
}while(number_not_in_leaf);
double sum=0;
for(i=0; i<siz; ++i){
sum += last_node[i];
}
leaf_sum +=sum;
block_offset += PacsetBaseModel<T, F>::bin_node_sizes[bin_counter];
}
preds.clear();
preds.push_back((double)leaf_sum);
preds.push_back((double)total_num_trees);
#ifdef BLOCK_LOGGING
return blocks_accessed.size();
#else
return 0;
#endif
}
inline void predict(const std::vector<std::vector<T>>& observation,
std::vector<int>& preds, std::vector<int>&results, bool mmap) {
}
inline void predict(const std::vector<std::vector<T>>& observation,
std::vector<double>& preds, std::vector<double>&results, bool mmap) {
//Predicts the class for a vector of observations
//By calling predict for a single observation and
//tallying the observations
//
int num_classes = std::stoi(Config::getValue("numclasses"));
int num_bins;
std::vector<double> elapsed_arr;
int blocks;
std::vector<int> num_blocks;
int ct=1;
for(auto single_obs : observation){
auto start = std::chrono::steady_clock::now();
if (mmap)
blocks = mmapAndPredict(single_obs, preds, ct);
else{
blocks = mmapAndPredict(single_obs, preds, ct);
}
num_blocks.push_back(blocks);
results.push_back((double)preds[0] / (double)preds[1] );
auto end = std::chrono::steady_clock::now();
ct+=1;
}
}
inline void serialize() {
auto bins = PacsetBaseModel<T, F>::bins;
int num_classes = std::stoi(Config::getValue("numclasses"));
int num_bins = bins.size();
std::vector<int> bin_sizes = PacsetBaseModel<T, F>::bin_sizes;
std::vector<int> bin_node_sizes = PacsetBaseModel<T, F>::bin_node_sizes;
std::vector<std::vector<int>> bin_start = PacsetBaseModel<T, F>::bin_start;
std::string format = Config::getValue("format");
//Write the metadata needed to reconstruct bins and for prediction
//TODO: change filename
std::string filename;
if(Config::getValue("metadatafilename") == std::string("notfound"))
filename = "metadata.txt";
else
filename = Config::getValue("metadatafilename");
std::fstream fout;
fout.open(filename, std::ios::out );
//Number of classes
fout<<num_classes<<"\n";
//Number of bins
fout<<num_bins<<"\n";
//Number of trees in each bin
for(auto i: bin_sizes){
fout<<i<<"\n";
}
//Number of nodes in each bin
for(auto i: bin_node_sizes){
fout<<i<<"\n";
}
//start position of each bin
for(auto bin: bin_start){
for(auto tree_start: bin){
fout<<tree_start<<"\n";
}
}
fout.close();
if(format == std::string("notfound") ||
format == std::string("binary")){
std::string modelfname = Config::getValue("packfilename");
std::string filename;
if(modelfname != std::string("notfound"))
filename = modelfname;
else
filename = "packedmodel.bin";
//Write the nodes
fout.open(filename, std::ios::binary | std::ios::out );
Node<T, F> node_to_write;
for(auto bin: bins){
for(auto node: bin){
node_to_write = node;
fout.write((char*)&node_to_write, sizeof(node_to_write));
}
}
fout.close();
}
else{
//Write the nodes
std::string modelfname = Config::getValue("packfilename");
std::string filename;
if(modelfname != std::string("notfound"))
filename = modelfname;
else
filename = "packedmodel.txt";
std::cout<<"filename: "<<filename <<"\n";
fout.open(filename, std::ios::out );
for(auto bin: bins){
for(auto node: bin){
fout<<node.getLeft()<<", "<<node.getRight()
<<", "<<node.getFeature()<<", "<<node.getThreshold()<<"\n";
}
}
fout.close();
}
}
inline void deserialize(){
//Write the metadata needed to reconstruct bins and for prediction
//TODO: change filename
int num_classes, num_bins;
std::string filename = Config::getValue("metadatafilename");
//std::string filename = "metadata.txt";
std::fstream f;
f.open(filename, std::ios::in );
//Number of classes
f>>num_classes;
Config::setConfigItem("numclasses", std::to_string(num_classes));
//Number of bins
f>>num_bins;
Config::setConfigItem("numthreads", std::to_string(num_bins));
std::vector<int> num_trees_bin;
std::vector<int> num_nodes_bin;
std::vector<std::vector<int>> bin_tree_start;
int val;
//Number of trees in each bin
for(int i=0; i<num_bins; ++i){
f>>val;
num_trees_bin.push_back(val);
}
//Number of nodes in each bin
for(int i=0; i<num_bins; ++i){
f>>val;
num_nodes_bin.push_back(val);
}
std::vector<int> temp;
//start position of each bin
for(int i=0; i<num_bins; ++i){
for(int j=0; j<num_trees_bin[i]; ++j){
f>>val;
temp.push_back(val);
}
bin_tree_start.push_back(temp);
temp.clear();
}
f.close();
setMembers(num_trees_bin, num_nodes_bin, bin_tree_start);
}
};
#endif
|
GB_binop__gt_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_01__gt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__gt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_fp32)
// A*D function (colscale): GB (_AxD__gt_fp32)
// D*A function (rowscale): GB (_DxB__gt_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_fp32)
// C=scalar+B GB (_bind1st__gt_fp32)
// C=scalar+B' GB (_bind1st_tran__gt_fp32)
// C=A+scalar GB (_bind2nd__gt_fp32)
// C=A'+scalar GB (_bind2nd_tran__gt_fp32)
// C type: bool
// A type: float
// B,b type: float
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_FP32 || GxB_NO_GT_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__gt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
binary_operation.h | /* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef __NUMPY_BINARY_OPERATION_H__
#define __NUMPY_BINARY_OPERATION_H__
#include "point_task.h"
namespace legate {
namespace numpy {
#if defined(LEGATE_USE_CUDA) && defined(__CUDACC__)
template <int DIM, typename BinaryFunction, typename Args>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
gpu_binary_op(const Args args, const bool dense)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= args.volume) return;
BinaryFunction func;
if (dense) {
args.outptr[idx] = func(args.in1ptr[idx], args.in2ptr[idx]);
} else {
const Legion::Point<DIM> point = args.pitches.unflatten(idx, args.rect.lo);
args.out[point] = func(args.in1[point], args.in2[point]);
}
}
#endif
// Base class for all Legate's binary operation tasks
template <class Derived, class BinaryFunction>
class BinaryOperationTask : public PointTask<Derived> {
private:
using first_argument_type = typename BinaryFunction::first_argument_type;
using second_argument_type = typename BinaryFunction::second_argument_type;
using result_type = std::result_of_t<BinaryFunction(first_argument_type, second_argument_type)>;
public:
static_assert(std::is_same<first_argument_type, second_argument_type>::value,
"BinaryOperationTask currently requires first_argument_type and "
"second_argument_type to be the same type.");
// XXX figure out how to hoist this into PointTask
static const int TASK_ID = task_id<BinaryFunction::op_code,
NUMPY_NORMAL_VARIANT_OFFSET,
result_type,
first_argument_type,
second_argument_type>;
// out_region = in_region1 op in_region2
static const int REGIONS = 3;
template <int N>
struct DeserializedArgs {
Legion::Rect<N> rect;
AccessorWO<result_type, N> out;
AccessorRO<first_argument_type, N> in1;
AccessorRO<second_argument_type, N> in2;
Pitches<N - 1> pitches;
size_t volume;
result_type* outptr;
const first_argument_type* in1ptr;
const second_argument_type* in2ptr;
bool deserialize(LegateDeserializer& derez,
const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions)
{
rect = NumPyProjectionFunctor::unpack_shape<N>(task, derez);
out = derez.unpack_accessor_WO<result_type, N>(regions[0], rect);
in1 = derez.unpack_accessor_RO<first_argument_type, N>(regions[1], rect);
in2 = derez.unpack_accessor_RO<second_argument_type, N>(regions[2], rect);
volume = pitches.flatten(rect);
#ifndef LEGION_BOUNDS_CHECKS
// Check to see if this is dense or not
return out.accessor.is_dense_row_major(rect) && in1.accessor.is_dense_row_major(rect) &&
in2.accessor.is_dense_row_major(rect) && (outptr = out.ptr(rect)) &&
(in1ptr = in1.ptr(rect)) && (in2ptr = in2.ptr(rect));
#else
// No dense execution if we're doing bounds checks
return false;
#endif
}
};
template <int DIM>
static void dispatch_cpu(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez)
{
DeserializedArgs<DIM> args;
const bool dense = args.deserialize(derez, task, regions);
if (args.volume == 0) return;
BinaryFunction func;
if (dense) {
for (size_t idx = 0; idx < args.volume; ++idx)
args.outptr[idx] = func(args.in1ptr[idx], args.in2ptr[idx]);
} else {
CPULoop<DIM>::binary_loop(func, args.out, args.in1, args.in2, args.rect);
}
}
#ifdef LEGATE_USE_OPENMP
template <int DIM>
static void dispatch_omp(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez)
{
DeserializedArgs<DIM> args;
const bool dense = args.deserialize(derez, task, regions);
if (args.volume == 0) return;
BinaryFunction func;
if (dense) {
#pragma omp parallel for schedule(static)
for (size_t idx = 0; idx < args.volume; ++idx) {
args.outptr[idx] = func(args.in1ptr[idx], args.in2ptr[idx]);
}
} else {
OMPLoop<DIM>::binary_loop(func, args.out, args.in1, args.in2, args.rect);
}
}
#endif
#if defined(LEGATE_USE_CUDA) && defined(__CUDACC__)
template <int DIM>
static void dispatch_gpu(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez)
{
DeserializedArgs<DIM> args;
const bool dense = args.deserialize(derez, task, regions);
if (args.volume == 0) return;
const size_t blocks = (args.volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
gpu_binary_op<DIM, BinaryFunction, DeserializedArgs<DIM>>
<<<blocks, THREADS_PER_BLOCK>>>(args, dense);
}
#elif defined(LEGATE_USE_CUDA)
template <int DIM>
static void dispatch_gpu(const Legion::Task* task,
const std::vector<Legion::PhysicalRegion>& regions,
LegateDeserializer& derez);
#endif
};
} // namespace numpy
} // namespace legate
#endif // __NUMPY_BINARY_OPERATION_H__
|
hola.c | #include <stdio.h>
#include "omp.h"
/*
Basado en el tutorial:
http://openmp.org/mp-documents/omp-hands-on-SC08.pdf
*/
void main(){
#pragma omp parallel
{
printf("Hola Mundo\n");
}
}
|
axpy.c | /*
* AXPY Y[N] = Y[N] + a*X[N]
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/timeb.h>
#include <pthread.h>
/* read timer in second */
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
/* read timer in ms */
double read_timer_ms() {
struct timeb tm;
ftime(&tm);
return (double) tm.time * 1000.0 + (double) tm.millitm;
}
#define REAL float
#define VECTOR_LENGTH 102400
/* initialize a vector with random floating point numbers */
void init(REAL A[], int N) {
int i;
for (i = 0; i < N; i++) {
A[i] = (double) drand48();
}
}
double check(REAL A[], REAL B[], int N) {
int i;
double sum = 0.0;
for (i = 0; i < N; i++) {
sum += A[i] - B[i];
}
return sum;
}
void axpy_base(int N, REAL Y[], REAL X[], REAL a);
void axpy_base_sub(int i_start, int Nt, int N, REAL Y[], REAL X[], REAL a);
void axpy_dist(int N, REAL Y[], REAL X[], REAL a, int num_tasks);
void axpy_omp_parallel(int N, REAL Y[], REAL X[], REAL a, int num_tasks);
int main(int argc, char *argv[]) {
int N = VECTOR_LENGTH;
int num_tasks = 4; /* 4 is default number of tasks */
double elapsed; /* for timing */
double elapsed_dist; /* for timing */
if (argc < 2) {
fprintf(stderr, "Usage: axpy <n> [<#tasks(%d)>] (n should be dividable by #tasks)\n", num_tasks);
exit(1);
}
N = atoi(argv[1]);
if (argc > 2) num_tasks = atoi(argv[2]);
REAL a = 123.456;
REAL Y_base[N];
REAL Y_dist[N];
REAL X[N];
srand48((1 << 12));
init(X, N);
init(Y_base, N);
memcpy(Y_dist, Y_base, N * sizeof(REAL));
/* example run */
elapsed = read_timer();
axpy_base(N, Y_base, X, a);
elapsed = (read_timer() - elapsed);
elapsed_dist = read_timer();
axpy_omp_parallel(N, Y_dist, X, a, num_tasks);
elapsed_dist = (read_timer() - elapsed_dist);
/* you should add the call to each function and time the execution */
printf("======================================================================================================\n");
printf("\tAXPY: Y[N] = Y[N] + a*X[N], N=%d, %d tasks for dist\n", N, num_tasks);
printf("------------------------------------------------------------------------------------------------------\n");
printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n");
printf("------------------------------------------------------------------------------------------------------\n");
printf("axpy_base:\t\t%4f\t%4f \t\t%g\n", elapsed * 1.0e3, (2.0 * N) / (1.0e6 * elapsed), check(Y_base, Y_base, N));
printf("axpy_dist:\t\t%4f\t%4f \t\t%g\n", elapsed_dist * 1.0e3, (2.0 * N) / (1.0e6 * elapsed_dist), check(Y_base, Y_dist, N));
return 0;
}
void axpy_base(int N, REAL Y[], REAL X[], REAL a) {
int i;
for (i = 0; i < N; ++i)
Y[i] += a * X[i];
}
void axpy_base_sub(int i_start, int Nt, int N, REAL Y[], REAL X[], REAL a) {
int i;
for (i = i_start; i < i_start + Nt; ++i)
Y[i] += a * X[i];
}
void axpy_dist(int N, REAL Y[], REAL X[], REAL a, int num_tasks) {
int tid;
for (tid = 0; tid < num_tasks; tid++) {
int Nt, start;
Nt = N/num_tasks;
start = tid*Nt;
axpy_base_sub(start, Nt, N, Y, X, a);
}
}
/* replace the for loop for task decomposition with "omp parallel" */
void axpy_omp_parallel(int N, REAL Y[], REAL X[], REAL a, int num_tasks) {
int tid;
//for (tid = 0; tid < num_tasks; tid++)
#pragma omp parallel shared (X,Y,a,N,num_tasks) private (tid) num_threads(num_tasks)
{
tid = omp_get_thread_num();
// int num_ths = omp_get_num_threads();
int Nt, start;
Nt = N/num_tasks;
start = tid*Nt;
axpy_base_sub(start, Nt, N, Y, X, a);
}
}
void axpy_omp_parallel_for(int N, REAL Y[], REAL X[], REAL a) {
int i;
int numthreads = omp_get_num_threads(); /* 1 since we are in the sequntial region */
#pragma omp parallel shared (X,Y,N,a) private (i)
{
#pragma omp master /* or using "single" */
{
int nthreads = omp_get_num_threads();
printf("nthreads: %d\n", nthreads);
}
#pragma omp for schedule(static)
for (i = 0; i < N; ++i)
Y[i] += a * X[i];
}
}
void axpy_omp_parallel_for_combined(int N, REAL Y[], REAL X[], REAL a) {
int i;
#pragma omp parallel for shared (N,X,Y,a) private(i) schedule(static)
for (i = 0; i < N; ++i)
Y[i] += a * X[i];
}
|
matrix.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M AAA TTTTT RRRR IIIII X X %
% MM MM A A T R R I X X %
% M M M AAAAA T RRRR I X %
% M M A A T R R I X X %
% M M A A T R R IIIII X X %
% %
% %
% MagickCore Matrix Methods %
% %
% Software Design %
% Cristy %
% August 2007 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image-private.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
/*
Typedef declaration.
*/
struct _MatrixInfo
{
CacheType
type;
size_t
columns,
rows,
stride;
MagickSizeType
length;
MagickBooleanType
mapped,
synchronize;
char
path[MagickPathExtent];
int
file;
void
*elements;
SemaphoreInfo
*semaphore;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMatrixInfo() allocates the ImageInfo structure.
%
% The format of the AcquireMatrixInfo method is:
%
% MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows,
% const size_t stride,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: the matrix columns.
%
% o rows: the matrix rows.
%
% o stride: the matrix stride.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(SIGBUS)
static void MatrixSignalHandler(int status)
{
ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache");
}
#endif
static inline MagickOffsetType WriteMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
static MagickBooleanType SetMatrixExtent(
MatrixInfo *magick_restrict matrix_info,MagickSizeType length)
{
MagickOffsetType
count,
extent,
offset;
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (matrix_info->synchronize != MagickFalse)
(void) posix_fallocate(matrix_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,MatrixSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns,
const size_t rows,const size_t stride,ExceptionInfo *exception)
{
char
*synchronize;
MagickBooleanType
status;
MatrixInfo
*matrix_info;
matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info));
if (matrix_info == (MatrixInfo *) NULL)
return((MatrixInfo *) NULL);
(void) memset(matrix_info,0,sizeof(*matrix_info));
matrix_info->signature=MagickCoreSignature;
matrix_info->columns=columns;
matrix_info->rows=rows;
matrix_info->stride=stride;
matrix_info->semaphore=AcquireSemaphoreInfo();
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
matrix_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
matrix_info->length=(MagickSizeType) columns*rows*stride;
if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,matrix_info->length);
if ((status != MagickFalse) &&
(matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length)))
{
status=AcquireMagickResource(MemoryResource,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->mapped=MagickFalse;
matrix_info->elements=AcquireMagickMemory((size_t)
matrix_info->length);
if (matrix_info->elements == NULL)
{
matrix_info->mapped=MagickTrue;
matrix_info->elements=MapBlob(-1,IOMode,0,(size_t)
matrix_info->length);
}
if (matrix_info->elements == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,matrix_info->length);
}
}
matrix_info->file=(-1);
if (matrix_info->elements == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,matrix_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=DiskCache;
matrix_info->file=AcquireUniqueFileResource(matrix_info->path);
if (matrix_info->file == -1)
return(DestroyMatrixInfo(matrix_info));
status=AcquireMagickResource(MapResource,matrix_info->length);
if (status != MagickFalse)
{
status=SetMatrixExtent(matrix_info,matrix_info->length);
if (status != MagickFalse)
matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0,
(size_t) matrix_info->length);
if (matrix_info->elements != NULL)
matrix_info->type=MapCache;
else
RelinquishMagickResource(MapResource,matrix_info->length);
}
}
return(matrix_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMagickMatrix() allocates and returns a matrix in the form of an
% array of pointers to an array of doubles, with all values pre-set to zero.
%
% This used to generate the two dimensional matrix, and vectors required
% for the GaussJordanElimination() method below, solving some system of
% simultanious equations.
%
% The format of the AcquireMagickMatrix method is:
%
% double **AcquireMagickMatrix(const size_t number_rows,
% const size_t size)
%
% A description of each parameter follows:
%
% o number_rows: the number pointers for the array of pointers
% (first dimension).
%
% o size: the size of the array of doubles each pointer points to
% (second dimension).
%
*/
MagickExport double **AcquireMagickMatrix(const size_t number_rows,
const size_t size)
{
double
**matrix;
register ssize_t
i,
j;
matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix));
if (matrix == (double **) NULL)
return((double **) NULL);
for (i=0; i < (ssize_t) number_rows; i++)
{
matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i]));
if (matrix[i] == (double *) NULL)
{
for (j=0; j < i; j++)
matrix[j]=(double *) RelinquishMagickMemory(matrix[j]);
matrix=(double **) RelinquishMagickMemory(matrix);
return((double **) NULL);
}
for (j=0; j < (ssize_t) size; j++)
matrix[i][j]=0.0;
}
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMatrixInfo() dereferences a matrix, deallocating memory associated
% with the matrix.
%
% The format of the DestroyImage method is:
%
% MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
LockSemaphoreInfo(matrix_info->semaphore);
switch (matrix_info->type)
{
case MemoryCache:
{
if (matrix_info->mapped == MagickFalse)
matrix_info->elements=RelinquishMagickMemory(matrix_info->elements);
else
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=(unsigned short *) NULL;
}
RelinquishMagickResource(MemoryResource,matrix_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=NULL;
RelinquishMagickResource(MapResource,matrix_info->length);
}
case DiskCache:
{
if (matrix_info->file != -1)
(void) close(matrix_info->file);
(void) RelinquishUniqueFileResource(matrix_info->path);
RelinquishMagickResource(DiskResource,matrix_info->length);
break;
}
default:
break;
}
UnlockSemaphoreInfo(matrix_info->semaphore);
RelinquishSemaphoreInfo(&matrix_info->semaphore);
return((MatrixInfo *) RelinquishMagickMemory(matrix_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G a u s s J o r d a n E l i m i n a t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussJordanElimination() returns a matrix in reduced row echelon form,
% while simultaneously reducing and thus solving the augumented results
% matrix.
%
% See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
% The format of the GaussJordanElimination method is:
%
% MagickBooleanType GaussJordanElimination(double **matrix,
% double **vectors,const size_t rank,const size_t number_vectors)
%
% A description of each parameter follows:
%
% o matrix: the matrix to be reduced, as an 'array of row pointers'.
%
% o vectors: the additional matrix argumenting the matrix for row reduction.
% Producing an 'array of column vectors'.
%
% o rank: The size of the matrix (both rows and columns).
% Also represents the number terms that need to be solved.
%
% o number_vectors: Number of vectors columns, argumenting the above matrix.
% Usally 1, but can be more for more complex equation solving.
%
% Note that the 'matrix' is given as a 'array of row pointers' of rank size.
% That is values can be assigned as matrix[row][column] where 'row' is
% typically the equation, and 'column' is the term of the equation.
% That is the matrix is in the form of a 'row first array'.
%
% However 'vectors' is a 'array of column pointers' which can have any number
% of columns, with each column array the same 'rank' size as 'matrix'.
%
% This allows for simpler handling of the results, especially is only one
% column 'vector' is all that is required to produce the desired solution.
%
% For example, the 'vectors' can consist of a pointer to a simple array of
% doubles. when only one set of simultanious equations is to be solved from
% the given set of coefficient weighted terms.
%
% double **matrix = AcquireMagickMatrix(8UL,8UL);
% double coefficents[8];
% ...
% GaussJordanElimination(matrix, &coefficents, 8UL, 1UL);
%
% However by specifing more 'columns' (as an 'array of vector columns',
% you can use this function to solve a set of 'separable' equations.
%
% For example a distortion function where u = U(x,y) v = V(x,y)
% And the functions U() and V() have separate coefficents, but are being
% generated from a common x,y->u,v data set.
%
% Another example is generation of a color gradient from a set of colors at
% specific coordients, such as a list x,y -> r,g,b,a.
%
% You can also use the 'vectors' to generate an inverse of the given 'matrix'
% though as a 'column first array' rather than a 'row first array'. For
% details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
*/
MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix,
double **vectors,const size_t rank,const size_t number_vectors)
{
#define GaussJordanSwap(x,y) \
{ \
if ((x) != (y)) \
{ \
(x)+=(y); \
(y)=(x)-(y); \
(x)=(x)-(y); \
} \
}
double
max,
scale;
register ssize_t
i,
j,
k;
ssize_t
column,
*columns,
*pivots,
row,
*rows;
columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns));
rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows));
pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots));
if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) ||
(pivots == (ssize_t *) NULL))
{
if (pivots != (ssize_t *) NULL)
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
if (columns != (ssize_t *) NULL)
columns=(ssize_t *) RelinquishMagickMemory(columns);
if (rows != (ssize_t *) NULL)
rows=(ssize_t *) RelinquishMagickMemory(rows);
return(MagickFalse);
}
(void) memset(columns,0,rank*sizeof(*columns));
(void) memset(rows,0,rank*sizeof(*rows));
(void) memset(pivots,0,rank*sizeof(*pivots));
column=0;
row=0;
for (i=0; i < (ssize_t) rank; i++)
{
max=0.0;
for (j=0; j < (ssize_t) rank; j++)
if (pivots[j] != 1)
{
for (k=0; k < (ssize_t) rank; k++)
if (pivots[k] != 0)
{
if (pivots[k] > 1)
return(MagickFalse);
}
else
if (fabs(matrix[j][k]) >= max)
{
max=fabs(matrix[j][k]);
row=j;
column=k;
}
}
pivots[column]++;
if (row != column)
{
for (k=0; k < (ssize_t) rank; k++)
GaussJordanSwap(matrix[row][k],matrix[column][k]);
for (k=0; k < (ssize_t) number_vectors; k++)
GaussJordanSwap(vectors[k][row],vectors[k][column]);
}
rows[i]=row;
columns[i]=column;
if (matrix[column][column] == 0.0)
return(MagickFalse); /* sigularity */
scale=PerceptibleReciprocal(matrix[column][column]);
matrix[column][column]=1.0;
for (j=0; j < (ssize_t) rank; j++)
matrix[column][j]*=scale;
for (j=0; j < (ssize_t) number_vectors; j++)
vectors[j][column]*=scale;
for (j=0; j < (ssize_t) rank; j++)
if (j != column)
{
scale=matrix[j][column];
matrix[j][column]=0.0;
for (k=0; k < (ssize_t) rank; k++)
matrix[j][k]-=scale*matrix[column][k];
for (k=0; k < (ssize_t) number_vectors; k++)
vectors[k][j]-=scale*vectors[k][column];
}
}
for (j=(ssize_t) rank-1; j >= 0; j--)
if (columns[j] != rows[j])
for (i=0; i < (ssize_t) rank; i++)
GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]);
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
rows=(ssize_t *) RelinquishMagickMemory(rows);
columns=(ssize_t *) RelinquishMagickMemory(columns);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x C o l u m n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixColumns() returns the number of columns in the matrix.
%
% The format of the GetMatrixColumns method is:
%
% size_t GetMatrixColumns(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->columns);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixElement() returns the specifed element in the matrix.
%
% The format of the GetMatrixElement method is:
%
% MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: return the matrix element in this buffer.
%
*/
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline MagickOffsetType ReadMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PREAD)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+
EdgeX(x,matrix_info->columns);
if (matrix_info->type != DiskCache)
{
(void) memcpy(value,(unsigned char *) matrix_info->elements+i*
matrix_info->stride,matrix_info->stride);
return(MagickTrue);
}
count=ReadMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x R o w s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixRows() returns the number of rows in the matrix.
%
% The format of the GetMatrixRows method is:
%
% size_t GetMatrixRows(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info)
{
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->rows);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L e a s t S q u a r e s A d d T e r m s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LeastSquaresAddTerms() adds one set of terms and associate results to the
% given matrix and vectors for solving using least-squares function fitting.
%
% The format of the AcquireMagickMatrix method is:
%
% void LeastSquaresAddTerms(double **matrix,double **vectors,
% const double *terms,const double *results,const size_t rank,
% const size_t number_vectors);
%
% A description of each parameter follows:
%
% o matrix: the square matrix to add given terms/results to.
%
% o vectors: the result vectors to add terms/results to.
%
% o terms: the pre-calculated terms (without the unknown coefficent
% weights) that forms the equation being added.
%
% o results: the result(s) that should be generated from the given terms
% weighted by the yet-to-be-solved coefficents.
%
% o rank: the rank or size of the dimensions of the square matrix.
% Also the length of vectors, and number of terms being added.
%
% o number_vectors: Number of result vectors, and number or results being
% added. Also represents the number of separable systems of equations
% that is being solved.
%
% Example of use...
%
% 2 dimensional Affine Equations (which are separable)
% c0*x + c2*y + c4*1 => u
% c1*x + c3*y + c5*1 => v
%
% double **matrix = AcquireMagickMatrix(3UL,3UL);
% double **vectors = AcquireMagickMatrix(2UL,3UL);
% double terms[3], results[2];
% ...
% for each given x,y -> u,v
% terms[0] = x;
% terms[1] = y;
% terms[2] = 1;
% results[0] = u;
% results[1] = v;
% LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL);
% ...
% if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) {
% c0 = vectors[0][0];
% c2 = vectors[0][1];
% c4 = vectors[0][2];
% c1 = vectors[1][0];
% c3 = vectors[1][1];
% c5 = vectors[1][2];
% }
% else
% printf("Matrix unsolvable\n);
% RelinquishMagickMatrix(matrix,3UL);
% RelinquishMagickMatrix(vectors,2UL);
%
*/
MagickPrivate void LeastSquaresAddTerms(double **matrix,double **vectors,
const double *terms,const double *results,const size_t rank,
const size_t number_vectors)
{
register ssize_t
i,
j;
for (j=0; j < (ssize_t) rank; j++)
{
for (i=0; i < (ssize_t) rank; i++)
matrix[i][j]+=terms[i]*terms[j];
for (i=0; i < (ssize_t) number_vectors; i++)
vectors[i][j]+=results[i]*terms[j];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t r i x T o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatrixToImage() returns a matrix as an image. The matrix elements must be
% of type double otherwise nonsense is returned.
%
% The format of the MatrixToImage method is:
%
% Image *MatrixToImage(const MatrixInfo *matrix_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
max_value,
min_value,
scale_factor,
value;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (matrix_info->stride < sizeof(double))
return((Image *) NULL);
/*
Determine range of matrix.
*/
(void) GetMatrixElement(matrix_info,0,0,&value);
min_value=value;
max_value=value;
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) matrix_info->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
if (value < min_value)
min_value=value;
else
if (value > max_value)
max_value=value;
}
}
if ((min_value == 0.0) && (max_value == 0.0))
scale_factor=0;
else
if (min_value == max_value)
{
scale_factor=(double) QuantumRange/min_value;
min_value=0;
}
else
scale_factor=(double) QuantumRange/(max_value-min_value);
/*
Convert matrix to image.
*/
image=AcquireImage((ImageInfo *) NULL,exception);
image->columns=matrix_info->columns;
image->rows=matrix_info->rows;
image->colorspace=GRAYColorspace;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
value;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
value=scale_factor*(value-min_value);
*q=ClampToQuantum(value);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N u l l M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NullMatrix() sets all elements of the matrix to zero.
%
% The format of the memset method is:
%
% MagickBooleanType *NullMatrix(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info)
{
register ssize_t
x;
ssize_t
count,
y;
unsigned char
value;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
if (matrix_info->type != DiskCache)
{
(void) memset(matrix_info->elements,0,(size_t)
matrix_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(matrix_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
for (x=0; x < (ssize_t) matrix_info->length; x++)
{
count=write(matrix_info->file,&value,sizeof(value));
if (count != (ssize_t) sizeof(value))
break;
}
if (x < (ssize_t) matrix_info->length)
break;
}
return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n q u i s h M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishMagickMatrix() frees the previously acquired matrix (array of
% pointers to arrays of doubles).
%
% The format of the RelinquishMagickMatrix method is:
%
% double **RelinquishMagickMatrix(double **matrix,
% const size_t number_rows)
%
% A description of each parameter follows:
%
% o matrix: the matrix to relinquish
%
% o number_rows: the first dimension of the acquired matrix (number of
% pointers)
%
*/
MagickExport double **RelinquishMagickMatrix(double **matrix,
const size_t number_rows)
{
register ssize_t
i;
if (matrix == (double **) NULL )
return(matrix);
for (i=0; i < (ssize_t) number_rows; i++)
matrix[i]=(double *) RelinquishMagickMemory(matrix[i]);
matrix=(double **) RelinquishMagickMemory(matrix);
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMatrixElement() sets the specifed element in the matrix.
%
% The format of the SetMatrixElement method is:
%
% MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: set the matrix element to this value.
%
*/
MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,const void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) y*matrix_info->columns+x;
if ((i < 0) ||
((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length))
return(MagickFalse);
if (matrix_info->type != DiskCache)
{
(void) memcpy((unsigned char *) matrix_info->elements+i*
matrix_info->stride,value,matrix_info->stride);
return(MagickTrue);
}
count=WriteMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
|
GB_unop__identity_uint64_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint64_bool)
// op(A') function: GB (_unop_tran__identity_uint64_bool)
// C type: uint64_t
// A type: bool
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = (uint64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint64_bool)
(
uint64_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint64_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ccode_omp.h | #include <stdexcept>
#include <sstream>
#include <string>
#include "defines.h"
void cmap_3clique(Graph &g, uint64_t &total,
std::vector<cmap8_t> &cmaps) {
std::cout << "3-clique using cmap\n";
uint64_t counter = 0;
// for each vertex in parallel
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)// reduction(+:edges)
for (vidType v0 = 0; v0 < g.V(); v0++) {
uint64_t local_counter = 0;
auto y0 = g.N(v0);
auto tid = omp_get_thread_num();
auto &cmap = cmaps[tid];
// set cmap bits
for (auto u : y0) {
#if USE_DAG == 0
if (u >= v0) break;
#endif
cmap.set(u, 1);
}
// 2nd vertex
for (auto v1 : y0) {
#if USE_DAG == 0
if (v1 >= v0) break;
#endif
auto y1 = g.N(v1);
// 3rd vertex
for (auto u : y1) {
#if USE_DAG == 0
if (u >= v1) break;
#endif
local_counter += (cmap.get(u) == 1);
}
}
// clear cmap bits
for (auto u : y0) {
#if USE_DAG == 0
if (u >= v0) break;
#endif
cmap.set(u, 0);
}
counter += local_counter;
}
total = counter;
}
void cmap_4clique(Graph &g, uint64_t &total,
std::vector<cmap8_t> &cmaps) {
std::cout << "4-clique using cmap\n";
uint64_t counter = 0;
#ifdef STATS_EDGES_VISITED
uint64_t edges = 0;
#endif
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)// reduction(+:edges)
for (vidType v0 = 0; v0 < g.V(); v0++) {
uint64_t local_counter = 0;
// uint64_t local_edges = 0;
auto y0 = g.N(v0);
auto tid = omp_get_thread_num();
auto &cmap = cmaps[tid];
for (auto u : y0) {
#if USE_DAG == 0
if (u >= v0) break;
#endif
// ++local_edges;
cmap.set(u, 1);
}
for (auto v1 : y0) {
#if USE_DAG == 0
if (v1 >= v0) break;
#endif
auto y1 = g.N(v1);
VertexSet y0y1;
y0y1.clear();
for (auto u : y1) {
#if USE_DAG == 0
if (u >= v1) break;
#endif
// ++local_edges;
if (cmap.get(u) == 1) {
cmap.set(u, 2);
y0y1.add(u);
}
}
for (auto v2 : y0y1) {
for (auto v3 : g.N(v2)) {
#if USE_DAG == 0
if (v3 >= v2) break;
#endif
// ++local_edges;
local_counter += (cmap.get(v3) == 2);
}
}
for (auto u : y0y1) cmap.set(u, 1);
}
for (auto u : y0) {
#if USE_DAG == 0
if (u >= v0) break;
#endif
cmap.set(u, 0);
}
// edges += local_edges;
counter += local_counter;
}
total = counter;
}
void cmap_5clique(Graph &g, uint64_t &total,
std::vector<cmap8_t> &cmaps) {
std::cout << "5-clique using cmap\n";
uint64_t counter = 0;
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0++) {
auto y0 = g.N(v0);
uint64_t local_counter = 0;
#if 0
for (auto v1 : y0) {
auto y1 = g.N(v1);
auto y0y1 = y0 & y1;
for (auto v2 : y0y1) {
auto y2 = g.N(v2);
auto y0y1y2 = y0y1 & y2;
for (auto v3 : y0y1y2)
local_counter += intersection_num(y0y1y2, g.N(v3));
}
}
#else
auto tid = omp_get_thread_num();
auto &cmap = cmaps[tid];
for (auto u : y0) cmap.set(u, 1);
for (auto v1 : y0) {
auto y1 = g.N(v1);
VertexSet y0y1;
y0y1.clear();
for (auto u : y1) {
if (cmap.get(u) == 1) {
cmap.set(u, 2);
y0y1.add(u);
}
}
for (auto v2 : y0y1) {
VertexSet y0y1y2;
y0y1y2.clear();
for (auto u : g.N(v2)) {
if (cmap.get(u) == 2) {
cmap.set(u, 3);
y0y1y2.add(u);
}
}
for (auto v3 : y0y1y2) {
for (auto v4 : g.N(v3)) {
local_counter += (cmap.get(v4) == 3);
}
}
for (auto u : y0y1y2) cmap.set(u, 2);
}
for (auto u : y0y1) cmap.set(u, 1);
}
for (auto u : y0) cmap.set(u, 0);
#endif
counter += local_counter;
}
total = counter;
}
// ad-hoc 4-clique
void cmap_4clique(Graph &g, uint64_t &total,
std::vector<cmap8_t> &cmaps,
std::vector<EmbList> &emb_lists) {
std::cout << "4-clique using cmap and embedding list\n";
uint64_t counter = 0;
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0 ++) {
auto tid = omp_get_thread_num();
auto &cmap = cmaps[tid];
auto &emb_list = emb_lists[tid];
auto y0 = g.N(v0);
for (auto u : y0) cmap.set(u, 1);
for (auto v1 : y0) {
emb_list.set_size(2, 0);
for (auto u : g.N(v1)) {
if (cmap.get(u) == 1) {
cmap.set(u, 2);
emb_list.add_emb(2, u);
}
}
for (vidType emb_id = 0; emb_id < emb_list.size(2); emb_id++) {
auto v2 = emb_list.get_vertex(2, emb_id);
for (auto v3 : g.N(v2)) {
// if (cmap.get(v3) == 2)
// counter ++;
counter += (cmap.get(v3) == 2);
}
}
for (vidType emb_id = 0; emb_id < emb_list.size(2); emb_id++) {
auto v = emb_list.get_vertex(2, emb_id);
cmap.set(v, 1);
}
}
for (auto u : y0) cmap.set(u, 0);
}
total = counter;
}
void cmap_5clique(Graph &g, uint64_t &total,
std::vector<cmap8_t> &cmaps,
std::vector<EmbList> &emb_lists) {
std::cout << "5-clique using cmap and embedding list\n";
uint64_t counter = 0;
#pragma omp parallel for schedule(dynamic, 1) reduction(+:counter)
for (vidType v0 = 0; v0 < g.V(); v0 ++) {
uint64_t local_counter = 0;
auto tid = omp_get_thread_num();
auto &cmap = cmaps[tid];
auto &emb_list = emb_lists[tid];
auto y0 = g.N(v0);
for (auto u : y0) cmap.set(u, 1);
for (auto v1 : y0) {
emb_list.set_size(2, 0);
for (auto u : g.N(v1)) {
if (cmap.get(u) == 1) {
cmap.set(u, 2);
emb_list.add_emb(2, u);
}
}
for (vidType id2 = 0; id2 < emb_list.size(2); id2++) {
auto v2 = emb_list.get_vertex(2, id2);
emb_list.set_size(3, 0);
for (auto u : g.N(v2)) {
if (cmap.get(u) == 2) {
cmap.set(u, 3);
emb_list.add_emb(3, u);
}
}
for (vidType id3 = 0; id3 < emb_list.size(3); id3++) {
auto v3 = emb_list.get_vertex(3, id3);
for (auto v4 : g.N(v3)) {
// if (cmap.get(v4) == 3)
// local_counter ++;
local_counter += (cmap.get(v4) == 3);
}
}
for (vidType id3 = 0; id3 < emb_list.size(3); id3++) {
auto v = emb_list.get_vertex(3, id3);
cmap.set(v, 2);
}
}
for (vidType id2 = 0; id2 < emb_list.size(2); id2++) {
auto v = emb_list.get_vertex(2, id2);
cmap.set(v, 1);
}
}
for (auto u : y0) cmap.set(u, 0);
counter += local_counter;
}
total = counter;
}
void cmap_kclique(Graph &g, unsigned k, uint64_t &total,
std::vector<cmap8_t> &cmaps) {
switch (k) {
case 3:
cmap_3clique(g, total, cmaps);
break;
case 4:
cmap_4clique(g, total, cmaps);
break;
case 5:
static_assert(USE_DAG == 1, "5-clique w/o DAG not implemented yet!\n");
cmap_5clique(g, total, cmaps);
break;
default:
std::stringstream sstream;
sstream << k << "-clique not implemented with cmap yet!";
throw runtime_error{sstream.str()};
}
}
|
jac_solv_parfor.c | /*
** PROGRAM: jacobi Solver .. parallel For version
**
** PURPOSE: This program will explore use of a jacobi iterative
** method to solve a system of linear equations (Ax= b).
**
** Here is the basic idea behind the method. Rewrite
** the matrix A as a Lower Triangular (L), upper triangular
** (U) and diagonal matrix (D)
**
** Ax = (L + D + U)x = b
**
** Carry out the multiplication and rearrange:
**
** Dx = b - (L+U)x --> x = (b-(L+U)x)/D
**
** We can do this iteratively
**
** x_new = (b-(L+U)x_old)/D
**
** USAGE: Run wtihout arguments to use default SIZE.
**
** ./jac_solv
**
** Run with a single argument for the order of the A
** matrix ... for example
**
** ./jac_solv 2500
**
** HISTORY: Written by Tim Mattson, Oct 2015
*/
#include<omp.h>
#include<math.h>
#include "mm_utils.h" //a library of basic matrix utilities functions
//and some key constants used in this program
//(such as TYPE)
#define TOLERANCE 0.001
#define DEF_SIZE 1000
#define MAX_ITERS 5000
#define LARGE 1000000.0
//#define DEBUG 1 // output a small subset of intermediate values
//#define VERBOSE 1
int main(int argc, char **argv)
{
int Ndim; // A[Ndim][Ndim]
int i,j, iters;
double start_time, elapsed_time;
TYPE conv, tmp, err, chksum;
TYPE *A, *b, *x1, *x2, *xnew, *xold, *xtmp;
// set matrix dimensions and allocate memory for matrices
if(argc ==2){
Ndim = atoi(argv[1]);
}
else{
Ndim = DEF_SIZE;
}
printf(" jacobi solver parallel for version: ndim = %d\n",Ndim);
A = (TYPE *) malloc(Ndim*Ndim*sizeof(TYPE));
b = (TYPE *) malloc(Ndim*sizeof(TYPE));
x1 = (TYPE *) malloc(Ndim*sizeof(TYPE));
x2 = (TYPE *) malloc(Ndim*sizeof(TYPE));
if (!A || !b || !x1 || !x2)
{
printf("\n memory allocation error\n");
exit(-1);
}
// generate our diagonally dominant matrix, A
init_diag_dom_near_identity_matrix(Ndim, A);
#ifdef VERBOSE
mm_print(Ndim, Ndim, A);
#endif
//
// Initialize x and just give b some non-zero random values
//
for(i=0; i<Ndim; i++){
x1[i] = (TYPE)0.0;
x2[i] = (TYPE)0.0;
b[i] = (TYPE)(rand()%51)/100.0;
}
start_time = omp_get_wtime();
//
// jacobi iterative solver
//
conv = LARGE;
iters = 0;
xnew = x1;
xold = x2;
{
// note: i am comparing against the convergence sqaured. This saves a
// sqrt and an extra barrier.
while((conv > TOLERANCE*TOLERANCE) && (iters<MAX_ITERS))
{
{
iters++;
conv = 0.0;
xtmp = xnew; // don't copy arrays.
xnew = xold; // just swap pointers.
xold = xtmp;
}
#pragma omp parallel for private(i,j)
for (i=0; i<Ndim; i++){
xnew[i] = (TYPE) 0.0;
for (j=0; j<Ndim;j++){
// if(i!=j)
// xnew[i]+= A[i*Ndim + j]*xold[j];
xnew[i]+= A[i*Ndim + j]*xold[j] * (i != j);
}
xnew[i] = (b[i]-xnew[i])/A[i*Ndim+i];
}
//
// test convergence
//
#pragma omp parallel for private(tmp) reduction(+:conv)
for (i=0; i<Ndim; i++){
tmp = xnew[i]-xold[i];
conv += tmp*tmp;
}
#ifdef DEBUG
printf(" conv = %f \n",(float)conv);
#endif
}
}
conv = sqrt((double)conv);
elapsed_time = omp_get_wtime() - start_time;
printf(" Convergence = %g with %d iterations and %f seconds\n",
(float)conv, iters, (float)elapsed_time);
//
// test answer by multiplying my computed value of x by
// the input A matrix and comparing the result with the
// input b vector.
//
err = (TYPE) 0.0;
chksum = (TYPE) 0.0;
for(i=0;i<Ndim;i++){
xold[i] = (TYPE) 0.0;
for(j=0; j<Ndim; j++)
xold[i] += A[i*Ndim+j]*xnew[j];
tmp = xold[i] - b[i];
#ifdef DEBUG
printf(" i=%d, diff = %f, computed b = %f, input b= %f \n",
i, (float)tmp, (float)xold[i], (float)b[i]);
#endif
chksum += xnew[i];
err += tmp*tmp;
}
err = sqrt((double)err);
printf("jacobi solver: err = %f, solution checksum = %f \n",
(float)sqrt(err), (float)chksum);
free(A);
free(b);
free(x1);
free(x2);
}
|
GB_unop__abs_fp32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_fp32_fc32)
// op(A') function: GB (_unop_tran__abs_fp32_fc32)
// C type: float
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = (aij)
// unaryop: cij = cabsf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cabsf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = (aij) ; \
Cx [pC] = cabsf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_fp32_fc32)
(
float *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = cabsf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = cabsf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_fp32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
multiprocomp.h | /* This file is part of the Tomographer project, which is distributed under the
* terms of the MIT license.
*
* The MIT License (MIT)
*
* Copyright (c) 2016 ETH Zurich, Institute for Theoretical Physics, Philippe Faist
* Copyright (c) 2017 Caltech, Institute for Quantum Information and Matter, Philippe Faist
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef MULTIPROCOMP_H
#define MULTIPROCOMP_H
#include <csignal>
#include <chrono>
#include <stdexcept>
#ifdef _OPENMP
#include <omp.h>
#else
inline constexpr int omp_get_thread_num() { return 0; }
inline constexpr int omp_get_num_threads() { return 1; }
#endif
#include <boost/exception/diagnostic_information.hpp>
#include <tomographer/tools/loggers.h>
#include <tomographer/tools/cxxutil.h> // tomographer_assert()
#include <tomographer/tools/needownoperatornew.h>
#include <tomographer/multiproc.h>
#include <tomographer/multiprocthreadcommon.h>
/** \file multiprocomp.h
*
* \brief Multiprocessing with OpenMP parallel multithreading.
*
* See \ref Tomographer::MultiProc::OMP.
*
*/
namespace Tomographer {
namespace MultiProc {
namespace OMP {
/** \brief Wrapper logger to call non-thread-safe loggers from a multithreaded environment.
*
* Wraps calls to emit log messages into a OpenMP
* \code
* #pragma omp critical
* \endcode
* sections, which ensure thread-safety of the logging. Of course don't log too often,
* as this will drastically slow down the execution of your program!!
*
* \note If the base logger is already thread-safe (as defined by \ref
* Logger::DefaultLoggerTraits::IsThreadSafe "LoggerTraits::IsThreadSafe"), then the
* call to emit the log is not wrapped in a critical section, but directly called.
*
* \todo Buffer log entries here to optimize performance and to limit the number of
* <code>\#pragma omp critical</code> blocks.
* <b>---NO DON'T. It would make it complex to debug afterwards; if there is a crash,
* some messages may not be displayed making debugging difficult.</b>
*
* \warning The runtime level of this logger is fixed to the level of the base logger at
* the moment of instanciation. Any changes to the level of the base logger afterwards
* will not be reflected here. This is for thread-safety/consistency reasons.
*
* \warning If your base logger has a \a filterByOrigin() mechanism and is not
* thread-safe, this might be very slow because a OMP critical section is opened on each
* log message which needs to be tested for its origin.
*
* Example usage:
* \code
* SomeLogger logger;
*
* #pragma omp parallel ...
* {
* ... // parallel code
*
* // it may not be safe to log to `logger`, because it might not be
* // thread-safe. So create a ThreadSanitizerLogger to which we can
* // safely log and pass to sub-routines that want a logger.
* ThreadSanitizerLogger<SomeLogger> threadsafelogger(logger);
*
* threadsafelogger.longdebug( ... ); // safe
*
* // the logger may be passed to subtasks
* FidelityHistogramStatsCollector<MatrQ, double, ThreadSanitizerLogger<SomeLogger> >
* fidelityhistogramcollector(..., threadsafelogger);
*
* ... // more parallel code
*
* }
* \endcode
*
*/
template<typename BaseLogger>
class TOMOGRAPHER_EXPORT ThreadSanitizerLogger
: public Logger::LoggerBase<ThreadSanitizerLogger<BaseLogger> >
{
public:
static constexpr bool IsBaseLoggerThreadSafe = Logger::LoggerTraits<BaseLogger>::IsThreadSafe;
private:
BaseLogger & _baselogger;
public:
/** \brief Constructor
*
* This constructor accepts arbitrary more arguments and ignores them. The reason is
* because the task dispatcher does not know for sure which type the task-logger is (you
* can specify your custom type), and will always invoke the constructor with additional
* parameters such as a pointer to the \a TaskCData. Here we don't need those so we can
* just ignore any additional args.
*/
template<typename... MoreArgs>
ThreadSanitizerLogger(BaseLogger & logger, MoreArgs && ...)
// NOTE: pass the baselogger's level on here. The ThreadSanitizerLogger's level is
// this one, and is fixed and cannot be changed while running.
: Logger::LoggerBase<ThreadSanitizerLogger<BaseLogger> >(logger.level()),
_baselogger(logger)
{
}
~ThreadSanitizerLogger()
{
}
//! Implementation of Logger::LoggerBase::emitLog() for a base logger which is thread-safe
TOMOGRAPHER_ENABLED_IF(IsBaseLoggerThreadSafe)
inline void emitLog(int level, const char * origin, const std::string& msg)
{
_baselogger.emitLog(level, origin, msg);
}
//! Implementation of Logger::LoggerBase::filterByOrigin() for a base logger which is thread-safe
TOMOGRAPHER_ENABLED_IF(Logger::LoggerTraits<BaseLogger>::HasFilterByOrigin &&
IsBaseLoggerThreadSafe)
bool filterByOrigin(int level, const char * origin) const
{
return _baselogger.filterByOrigin(level, origin);
}
//! Implementation of Logger::LoggerBase::emitLog() for a base logger which is not thread-safe
TOMOGRAPHER_ENABLED_IF(!IsBaseLoggerThreadSafe)
inline void emitLog(int level, const char * origin, const std::string& msg)
{
#pragma omp critical
{
_baselogger.emitLog(level, origin, msg);
}
}
//! Implementation of Logger::LoggerBase::filterByOrigin() for a base logger which is not thread-safe
TOMOGRAPHER_ENABLED_IF(Logger::LoggerTraits<BaseLogger>::HasFilterByOrigin &&
!IsBaseLoggerThreadSafe)
bool filterByOrigin(int level, const char * origin) const
{
bool ok;
#pragma omp critical
{
ok = _baselogger.filterByOrigin(level, origin);
}
return ok;
}
};
} // namespace OMP
} // namespace MultiProc
namespace Logger {
/** \brief Specialized Traits for \ref
* Tomographer::MultiProc::OMP::ThreadSanitizerLogger<typename BaseLogger> --
* see \ref Tomographer::Logger::LoggerTraits<typename LoggerType>
*
* Logger traits for \ref MultiProc::OMP::ThreadSanitizerLogger.
*/
template<typename BaseLogger>
struct TOMOGRAPHER_EXPORT LoggerTraits<MultiProc::OMP::ThreadSanitizerLogger<BaseLogger> >
: public LoggerTraits<BaseLogger>
{
/** \brief Special flags for this logger */
enum {
/** \brief explicitly require our logger instance to store its level. The level cannot be
* changed */
HasOwnGetLevel = 0,
/** \brief Obviously this logger is now always thread-safe */
IsThreadSafe = 1
};
};
} // namespace Logger
namespace MultiProc {
namespace OMP {
/** \brief Dispatches tasks to parallel threads using OpenMP
*
* Uses <a href="http://openmp.org/">OpenMP</a> to parallelize the repetition of a same
* task with different inputs.
*
* Check out <a href="https://computing.llnl.gov/tutorials/openMP/">this good tutorial on
* OpenMP</a>.
*
* \since Changed in %Tomographer 5.0: removed results collector, introduced
* collectedTaskResults() and friends
*
* <ul>
*
* <li> \a TaskType must be a \ref pageInterfaceTask compliant type. This type specifies
* the task which has to be run. Objects of this type will be instantiated within
* separate threads to run the tasks.
*
* <li> \a TaskCData should conform to the \ref pageInterfaceTaskCData.
*
* \a TaskCData may be any struct which contains all the information which
* needs to be accessed by the task. It should be read-only, i.e. the task should
* not need to write to this information. (This typically encodes the data of the
* problem, ie. experimental measurement results.)
*
* <li> \a LoggerType is a logger type derived from \ref Logger::LoggerBase, for example
* \ref Logger::FileLogger. This is the type of a logger defined in the caller's
* scope (and given as constructor argument here) to which messages should be logged
* to.
*
* <li> \a TaskLoggerType is the type of the logger which will be provided to tasks inside
* the parallel section. Such logger should ensure that the logging is
* thread-safe. By default \a TaskLoggerType is nothing else than an appropriate \ref
* ThreadSanitizerLogger.
*
* (Note that if the originally given \c logger is thread-safe (see \ref
* Logger::LoggerTraits), then \ref ThreadSanitizerLogger directly relays calls
* without wrapping them into OMP critical sections.)
*
* For each task, a new \a TaskLoggerType will be created. The constructor is expected
* to accept the following arguments:
* \code
* TaskLoggerType(LoggerType & baselogger, const TaskCData * pcdata, CountIntType k)
* \endcode
* where \a baselogger is the logger given to the \a TaskDispatcher constructor,
* \a pcdata is the constant shared data pointer also given to the constructor, and
* \a k is the task number (which may range from 0 to the total number of tasks -
* 1). The task logger is NOT constructed in a thread-safe code region, so use \c
* "\#pragma omp critical" if necessary. You may use \a omp_get_thread_num() and \a
* omp_get_num_threads() to get the current thread number and the total number of
* threads, respectively.
*
* <li> \a TaskCountIntType should be a type to use to count the number of tasks. Usually
* there's no reason not to use an \c int.
*
* </ul>
*
*/
template<typename TaskType_, typename TaskCData_, typename LoggerType_,
typename TaskCountIntType_ = int,
typename TaskLoggerType_ = ThreadSanitizerLogger<LoggerType_> >
class TOMOGRAPHER_EXPORT TaskDispatcher
: public Tomographer::MultiProc::ThreadCommon::TaskDispatcherBase<
TaskType_,
TaskCountIntType_
>
{
public:
//! Base class, provides common functionality to all thread-based MutliProc implementations
typedef Tomographer::MultiProc::ThreadCommon::TaskDispatcherBase<TaskType_, TaskCountIntType_> Base;
//! The task type
using typename Base::TaskType;
//! The task result type
using typename Base::TaskResultType;
//! The type used by a single task when providing a status report
using typename Base::TaskStatusReportType;
//! Integer type used to count the number of tasks to run (or running)
using typename Base::TaskCountIntType;
//! The type to use to generate a full status report of all running tasks
using typename Base::FullStatusReportType;
//! The type which stores constant, shared data for all tasks to access
typedef TaskCData_ TaskCData;
//! The logger type specified to the dispatcher (not necessarily thread-safe)
typedef LoggerType_ LoggerType;
//! A thread-safe logger type which is passed on to the child tasks
typedef TaskLoggerType_ TaskLoggerType;
/** \brief The relevant type for a callback function (or callable) which is provided
* with the full status report
*
* See \ref setStatusReportHandler().
*/
using typename Base::FullStatusReportCallbackType;
private:
typedef typename Base::template ThreadSharedData<TaskCData, LoggerType>
ThreadSharedDataType;
ThreadSharedDataType shared_data;
TaskCountIntType n_chunk;
struct CriticalSectionManager {
template<typename Fn>
inline void critical_status_report(Fn && fn) {
#pragma omp critical
{
fn();
}
}
template<typename Fn>
inline void critical_status_report_and_user_fn(Fn && fn) {
#pragma omp critical
{
fn();
}
}
template<typename Fn>
inline void critical_status_report_and_schedule(Fn && fn) {
#pragma omp critical
{
fn();
}
}
template<typename Fn>
inline void critical_schedule(Fn && fn) {
#pragma omp critical
{
fn();
}
}
};
CriticalSectionManager critical;
typedef typename Base::template ThreadPrivateData<
ThreadSharedDataType,
Tomographer::Logger::LocalLogger<TaskLoggerType>,
CriticalSectionManager
>
ThreadPrivateDataType;
public:
/** \brief Task dispatcher constructor
*
* \param pcdata_ The constant shared data, which will be accessible by all
* tasks
*
* \param logger_ The logger instance to use to log messages. This logger
* does not need to be thread safe.
*
* \param num_total_runs_ The number of tasks to run in total. Recall that
* the inputs to the different task instances are provided by
* the TaskCData's getTaskInput() method (see \ref
* pageInterfaceTaskCData).
*
* \param n_chunk_ How many tasks to chunk together into one thread. This
* corresponds to OpenMP's chunk argument in the instruction
* <em>schedule(dynamic,chunk)</em> in a <em>\#pragma omp
* for</em> instruction (see <a
* href="https://computing.llnl.gov/tutorials/openMP/\#DO"
* target="_blank">this page</a>).
*/
TaskDispatcher(TaskCData * pcdata_, LoggerType & logger_,
TaskCountIntType num_total_runs_,
TaskCountIntType n_chunk_ = 1)
: shared_data(pcdata_, logger_, num_total_runs_, 0), n_chunk(n_chunk_)
{
}
TaskDispatcher(TaskDispatcher && x)
: shared_data(std::move(x.shared_data)),
n_chunk(x.n_chunk)
{
}
~TaskDispatcher()
{
}
/** \brief Run the specified tasks
*
* Do everything, run tasks, collect results etc.
*/
void run()
{
auto logger = Tomographer::Logger::makeLocalLogger(TOMO_ORIGIN,
shared_data.logger);
logger.debug("Let's go!");
shared_data.time_start = Base::StdClockType::now();
logger.debug("Preparing for parallel runs");
#ifndef _OPENMP
logger.warning("OpenMP is disabled; tasks will run serially.");
#endif
// declaring these as "const" causes a weird compiler error
// "`n_chunk' is predetermined `shared' for `shared'"
TaskCountIntType num_total_runs = shared_data.schedule.num_total_runs;
TaskCountIntType n_chunk_ = n_chunk;
(void)n_chunk_; // silence "unused variable" warning when compiling without OMP support
TaskCountIntType k = 0;
ThreadSharedDataType * shdat = & shared_data;
const std::string logger_prefix = logger.originPrefix()+logger.glue()+"worker";
const std::string * logger_prefix_ptr = &logger_prefix;
logger.debug("About to start parallel section");
#pragma omp parallel default(none) private(k) shared(shdat, logger_prefix_ptr, num_total_runs, n_chunk_)
{
// construct a thread-safe logger we can use
TaskLoggerType threadsafelogger(shared_data.logger, shdat->pcdata, k);
Tomographer::Logger::LocalLogger<TaskLoggerType> locallogger(*logger_prefix_ptr, threadsafelogger);
ThreadPrivateDataType private_data(omp_get_thread_num(), & shared_data, locallogger, critical);
private_data.shared_data = shdat;
private_data.task_id = -1;
// master thread sets shared_data.schedule.num_threads ...
#pragma omp master
{
shdat->schedule.num_threads = omp_get_num_threads();
}
// ... while other threads wait for master to be done
#pragma omp barrier
#pragma omp flush
//
// Register new parallel worker
//
this->run_worker_enter(private_data, *shdat);
//
// The main, parallel FOR loop:
//
#pragma omp for schedule(dynamic,n_chunk) nowait
for (k = 0; k < num_total_runs; ++k) {
private_data.task_id = k;
this->run_task(private_data, shared_data);
} // omp for
//
// De-register parallel worker
//
this->run_worker_exit(private_data, *shdat);
#pragma omp master
{
this->master_continue_monitoring_status(private_data, *shdat) ;
}
} // omp parallel
logger.debug("OpenMP parallel section finished");
this->run_epilog(shared_data, logger) ;
logger.debug("Done.");
}
/** \brief Total number of task run instances
*
*/
inline TaskCountIntType numTaskRuns() const {
return shared_data.schedule.num_total_runs;
}
/** \brief Get all the task results
*
*/
inline const std::vector<TaskResultType*> & collectedTaskResults() const {
return shared_data.results;
}
/** \brief Get the result of a specific given task
*
*/
inline const TaskResultType & collectedTaskResult(std::size_t k) const {
return *shared_data.results[k];
}
/** \brief assign a callable to be called whenever a status report is
* requested
*
* This function remembers the given \a fnstatus callable, so that each time
* that \ref requestStatusReport() is called at any later point, then this
* callback will be invoked.
*
* The callback, when invoked, will be called with a single parameter of type
* \ref FullStatusReport "FullStatusReport<TaskStatusReportType>". It is
* guaranteed to be called from within the main thread, that is, the one with
* <code>omp_get_thread_num() == 0</code>.
*/
inline void setStatusReportHandler(FullStatusReportCallbackType fnstatus)
{
#pragma omp critical
{
shared_data.status_report.user_fn = fnstatus;
}
}
/** \brief Request a status report
*
* This function makes a note that a status report has been requested.
* Subsequently, the tasks should notice it (provided they regularly query for
* status report requests as described on the page \ref pageInterfaceTask),
* and provide status reports. When all the reports have been received from
* all running threads, the full status report is passed on to the callback
* set with \ref setStatusReportHandler().
*
* \note This function is safe to be called from within a signal handler.
*/
inline void requestStatusReport()
{
//
// This function can be called from a signal handler. We essentially can't
// do anything here because the state of the program can be pretty much
// anything, including inside a malloc() or gomp lock. So can't call any
// function which needs malloc or a #pragma omp critical.
//
// So just increment an atomic int.
//
++ shared_data.status_report.event_counter_user;
}
/** \brief Request a periodic status report
*
* The status report function callback set with \ref setStatusReportHandler()
* will be called every \a milliseconds milliseconds with a status report.
*
* Pass \a -1 as argument to milliseconds to disable periodic status reports.
*/
inline void requestPeriodicStatusReport(int milliseconds)
{
#pragma omp critical
{
shared_data.status_report.periodic_interval = milliseconds;
}
}
/** \brief Request an immediate interruption of the tasks.
*
* Execution inside the function \ref run() will stop as soon as each workers
* notices the interrupt request, and will emit the \ref
* TasksInterruptedException.
*
* The periodic check on the tasks' side is implemented in each tasks' check
* for a status report, so that any \ref pageInterfaceTask -compliant type
* which periodically checks for status reports is automatically
* interruptible.
*
* \note This function is safe to be called from within a signal handler.
*/
inline void requestInterrupt()
{
shared_data.schedule.interrupt_requested = 1;
}
}; // class TaskDispatcher
/** \brief Create an OMP task dispatcher. Useful if you want C++'s template argument
* deduction mechanism
*/
template<typename TaskType_, typename TaskCData_,
typename LoggerType_, typename TaskCountIntType_ = int>
inline TaskDispatcher<TaskType_, TaskCData_,
LoggerType_, TaskCountIntType_>
makeTaskDispatcher(TaskCData_ * pcdata_, LoggerType_ & logger_,
TaskCountIntType_ num_total_runs_, TaskCountIntType_ n_chunk_ = 1)
{
// RVO should be rather obvious to the compiler
return TaskDispatcher<TaskType_, TaskCData_,
LoggerType_, TaskCountIntType_>(
pcdata_, logger_, num_total_runs_, n_chunk_
);
}
} // namespace OMP
} // namespace MultiProc
} // namespace Tomographer
#endif
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal swift_name
/// attribute for the decl \p D. Raise a diagnostic if the name is invalid
/// for the given declaration.
///
/// For a function, this will validate a compound Swift name,
/// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>,
/// and the function will output the number of parameter names, and whether
/// this is a single-arg initializer.
///
/// For a type, enum constant, property, or variable declaration, this will
/// validate either a simple identifier, or a qualified
/// <code>context.identifier</code> name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name,
SourceLocation ArgLoc,
const IdentifierInfo *AttrName);
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation NameLoc,
TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name, bool Override);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type through some means not written in source (e.g. API notes).
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param diagLoc The location to use for diagnostics.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkImplicitNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation diagLoc,
bool allowArrayTypes,
bool overrideExisting);
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, and false is returned.
bool CheckConstraintExpression(Expr *CE);
bool CalculateConstraintSatisfaction(ConceptDecl *NamedConcept,
MultiLevelTemplateArgumentList &MLTAL,
Expr *ConstraintExpr,
bool &IsSatisfied);
/// Check that the associated constraints of a template declaration match the
/// associated constraints of an older declaration of which it is a
/// redeclaration.
bool CheckRedeclarationConstraintMatch(TemplateParameterList *Old,
TemplateParameterList *New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
SourceLocation ConceptNameLoc, NamedDecl *FoundDecl,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
// Concepts
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, TemplateDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckForDelayedContext = true);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckCaller = true);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis();
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Marks all the functions that might be required for the currently active
/// OpenMP context.
void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
FunctionDecl *Func,
bool MightBeOdrUse);
public:
/// Struct to store the context selectors info for declare variant directive.
struct OpenMPDeclareVariantCtsSelectorData {
OMPDeclareVariantAttr::CtxSelectorSetType CtxSet =
OMPDeclareVariantAttr::CtxSetUnknown;
OMPDeclareVariantAttr::CtxSelectorType Ctx =
OMPDeclareVariantAttr::CtxUnknown;
MutableArrayRef<StringRef> ImplVendors;
ExprResult CtxScore;
explicit OpenMPDeclareVariantCtsSelectorData() = default;
explicit OpenMPDeclareVariantCtsSelectorData(
OMPDeclareVariantAttr::CtxSelectorSetType CtxSet,
OMPDeclareVariantAttr::CtxSelectorType Ctx,
MutableArrayRef<StringRef> ImplVendors, ExprResult CtxScore)
: CtxSet(CtxSet), Ctx(Ctx), ImplVendors(ImplVendors),
CtxScore(CtxScore) {}
};
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(
DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param Data Set of context-specific data for the specified context
/// selector.
void ActOnOpenMPDeclareVariantDirective(
FunctionDecl *FD, Expr *VariantRef, SourceRange SR,
const Sema::OpenMPDeclareVariantCtsSelectorData &Data);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
bool &DerivedToBase, bool &ObjCConversion,
bool &ObjCLifetimeConversion,
bool &FunctionConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
QualType BaseType, QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
c-decl.c | /* Process declarations and variables for C compiler.
Copyright (C) 1988-2015 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* Process declarations and symbol lookup for C front end.
Also constructs types; the standard scalar types at initialization,
and structure, union, array and enum types when they are declared. */
/* ??? not all decl nodes are given the most useful possible
line numbers. For example, the CONST_DECLs for enum values. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "input.h"
#include "tm.h"
#include "intl.h"
#include "hash-set.h"
#include "vec.h"
#include "symtab.h"
#include "input.h"
#include "alias.h"
#include "double-int.h"
#include "machmode.h"
#include "inchash.h"
#include "tree.h"
#include "fold-const.h"
#include "print-tree.h"
#include "stor-layout.h"
#include "varasm.h"
#include "attribs.h"
#include "stringpool.h"
#include "tree-inline.h"
#include "flags.h"
#include "hashtab.h"
#include "hash-set.h"
#include "vec.h"
#include "machmode.h"
#include "hard-reg-set.h"
#include "function.h"
#include "c-tree.h"
#include "toplev.h"
#include "tm_p.h"
#include "cpplib.h"
#include "target.h"
#include "debug.h"
#include "opts.h"
#include "timevar.h"
#include "c-family/c-common.h"
#include "c-family/c-objc.h"
#include "c-family/c-pragma.h"
#include "c-family/c-ubsan.h"
#include "c-lang.h"
#include "langhooks.h"
#include "tree-iterator.h"
#include "diagnostic-core.h"
#include "dumpfile.h"
#include "hash-map.h"
#include "is-a.h"
#include "plugin-api.h"
#include "ipa-ref.h"
#include "cgraph.h"
#include "hash-table.h"
#include "langhooks-def.h"
#include "plugin.h"
#include "c-family/c-ada-spec.h"
#include "cilk.h"
#include "builtins.h"
/* In grokdeclarator, distinguish syntactic contexts of declarators. */
enum decl_context
{ NORMAL, /* Ordinary declaration */
FUNCDEF, /* Function definition */
PARM, /* Declaration of parm before function body */
FIELD, /* Declaration inside struct or union */
TYPENAME}; /* Typename (inside cast or sizeof) */
/* States indicating how grokdeclarator() should handle declspecs marked
with __attribute__((deprecated)). An object declared as
__attribute__((deprecated)) suppresses warnings of uses of other
deprecated items. */
enum deprecated_states {
DEPRECATED_NORMAL,
DEPRECATED_SUPPRESS
};
/* Nonzero if we have seen an invalid cross reference
to a struct, union, or enum, but not yet printed the message. */
tree pending_invalid_xref;
/* File and line to appear in the eventual error message. */
location_t pending_invalid_xref_location;
/* The file and line that the prototype came from if this is an
old-style definition; used for diagnostics in
store_parm_decls_oldstyle. */
static location_t current_function_prototype_locus;
/* Whether this prototype was built-in. */
static bool current_function_prototype_built_in;
/* The argument type information of this prototype. */
static tree current_function_prototype_arg_types;
/* The argument information structure for the function currently being
defined. */
static struct c_arg_info *current_function_arg_info;
/* The obstack on which parser and related data structures, which are
not live beyond their top-level declaration or definition, are
allocated. */
struct obstack parser_obstack;
/* The current statement tree. */
static GTY(()) struct stmt_tree_s c_stmt_tree;
/* State saving variables. */
tree c_break_label;
tree c_cont_label;
/* A list of decls to be made automatically visible in each file scope. */
static GTY(()) tree visible_builtins;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
int current_function_returns_abnormally;
/* Set to nonzero by `grokdeclarator' for a function
whose return type is defaulted, if warnings for this are desired. */
static int warn_about_return_type;
/* Nonzero when the current toplevel function contains a declaration
of a nested function which is never defined. */
static bool undef_nested_function;
/* Mode used to build pointers (VOIDmode means ptr_mode). */
machine_mode c_default_pointer_mode = VOIDmode;
/* If non-zero, implicit "omp declare target" attribute is added into the
attribute lists. */
int current_omp_declare_target_attribute;
/* Each c_binding structure describes one binding of an identifier to
a decl. All the decls in a scope - irrespective of namespace - are
chained together by the ->prev field, which (as the name implies)
runs in reverse order. All the decls in a given namespace bound to
a given identifier are chained by the ->shadowed field, which runs
from inner to outer scopes.
The ->decl field usually points to a DECL node, but there are two
exceptions. In the namespace of type tags, the bound entity is a
RECORD_TYPE, UNION_TYPE, or ENUMERAL_TYPE node. If an undeclared
identifier is encountered, it is bound to error_mark_node to
suppress further errors about that identifier in the current
function.
The ->u.type field stores the type of the declaration in this scope;
if NULL, the type is the type of the ->decl field. This is only of
relevance for objects with external or internal linkage which may
be redeclared in inner scopes, forming composite types that only
persist for the duration of those scopes. In the external scope,
this stores the composite of all the types declared for this
object, visible or not. The ->inner_comp field (used only at file
scope) stores whether an incomplete array type at file scope was
completed at an inner scope to an array size other than 1.
The ->u.label field is used for labels. It points to a structure
which stores additional information used for warnings.
The depth field is copied from the scope structure that holds this
decl. It is used to preserve the proper ordering of the ->shadowed
field (see bind()) and also for a handful of special-case checks.
Finally, the invisible bit is true for a decl which should be
ignored for purposes of normal name lookup, and the nested bit is
true for a decl that's been bound a second time in an inner scope;
in all such cases, the binding in the outer scope will have its
invisible bit true. */
struct GTY((chain_next ("%h.prev"))) c_binding {
union GTY(()) { /* first so GTY desc can use decl */
tree GTY((tag ("0"))) type; /* the type in this scope */
struct c_label_vars * GTY((tag ("1"))) label; /* for warnings */
} GTY((desc ("TREE_CODE (%0.decl) == LABEL_DECL"))) u;
tree decl; /* the decl bound */
tree id; /* the identifier it's bound to */
struct c_binding *prev; /* the previous decl in this scope */
struct c_binding *shadowed; /* the innermost decl shadowed by this one */
unsigned int depth : 28; /* depth of this scope */
BOOL_BITFIELD invisible : 1; /* normal lookup should ignore this binding */
BOOL_BITFIELD nested : 1; /* do not set DECL_CONTEXT when popping */
BOOL_BITFIELD inner_comp : 1; /* incomplete array completed in inner scope */
BOOL_BITFIELD in_struct : 1; /* currently defined as struct field */
location_t locus; /* location for nested bindings */
};
#define B_IN_SCOPE(b1, b2) ((b1)->depth == (b2)->depth)
#define B_IN_CURRENT_SCOPE(b) ((b)->depth == current_scope->depth)
#define B_IN_FILE_SCOPE(b) ((b)->depth == 1 /*file_scope->depth*/)
#define B_IN_EXTERNAL_SCOPE(b) ((b)->depth == 0 /*external_scope->depth*/)
/* Each C symbol points to three linked lists of c_binding structures.
These describe the values of the identifier in the three different
namespaces defined by the language. */
struct GTY(()) lang_identifier {
struct c_common_identifier common_id;
struct c_binding *symbol_binding; /* vars, funcs, constants, typedefs */
struct c_binding *tag_binding; /* struct/union/enum tags */
struct c_binding *label_binding; /* labels */
};
/* Validate c-lang.c's assumptions. */
extern char C_SIZEOF_STRUCT_LANG_IDENTIFIER_isnt_accurate
[(sizeof(struct lang_identifier) == C_SIZEOF_STRUCT_LANG_IDENTIFIER) ? 1 : -1];
/* The binding oracle; see c-tree.h. */
void (*c_binding_oracle) (enum c_oracle_request, tree identifier);
/* This flag is set on an identifier if we have previously asked the
binding oracle for this identifier's symbol binding. */
#define I_SYMBOL_CHECKED(node) \
(TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (node)))
static inline struct c_binding* *
i_symbol_binding (tree node)
{
struct lang_identifier *lid
= (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node);
if (lid->symbol_binding == NULL
&& c_binding_oracle != NULL
&& !I_SYMBOL_CHECKED (node))
{
/* Set the "checked" flag first, to avoid infinite recursion
when the binding oracle calls back into gcc. */
I_SYMBOL_CHECKED (node) = 1;
c_binding_oracle (C_ORACLE_SYMBOL, node);
}
return &lid->symbol_binding;
}
#define I_SYMBOL_BINDING(node) (*i_symbol_binding (node))
#define I_SYMBOL_DECL(node) \
(I_SYMBOL_BINDING(node) ? I_SYMBOL_BINDING(node)->decl : 0)
/* This flag is set on an identifier if we have previously asked the
binding oracle for this identifier's tag binding. */
#define I_TAG_CHECKED(node) \
(TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (node)))
static inline struct c_binding **
i_tag_binding (tree node)
{
struct lang_identifier *lid
= (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node);
if (lid->tag_binding == NULL
&& c_binding_oracle != NULL
&& !I_TAG_CHECKED (node))
{
/* Set the "checked" flag first, to avoid infinite recursion
when the binding oracle calls back into gcc. */
I_TAG_CHECKED (node) = 1;
c_binding_oracle (C_ORACLE_TAG, node);
}
return &lid->tag_binding;
}
#define I_TAG_BINDING(node) (*i_tag_binding (node))
#define I_TAG_DECL(node) \
(I_TAG_BINDING(node) ? I_TAG_BINDING(node)->decl : 0)
/* This flag is set on an identifier if we have previously asked the
binding oracle for this identifier's label binding. */
#define I_LABEL_CHECKED(node) \
(TREE_LANG_FLAG_6 (IDENTIFIER_NODE_CHECK (node)))
static inline struct c_binding **
i_label_binding (tree node)
{
struct lang_identifier *lid
= (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node);
if (lid->label_binding == NULL
&& c_binding_oracle != NULL
&& !I_LABEL_CHECKED (node))
{
/* Set the "checked" flag first, to avoid infinite recursion
when the binding oracle calls back into gcc. */
I_LABEL_CHECKED (node) = 1;
c_binding_oracle (C_ORACLE_LABEL, node);
}
return &lid->label_binding;
}
#define I_LABEL_BINDING(node) (*i_label_binding (node))
#define I_LABEL_DECL(node) \
(I_LABEL_BINDING(node) ? I_LABEL_BINDING(node)->decl : 0)
/* The resulting tree type. */
union GTY((desc ("TREE_CODE (&%h.generic) == IDENTIFIER_NODE"),
chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node
{
union tree_node GTY ((tag ("0"),
desc ("tree_node_structure (&%h)")))
generic;
struct lang_identifier GTY ((tag ("1"))) identifier;
};
/* Track bindings and other things that matter for goto warnings. For
efficiency, we do not gather all the decls at the point of
definition. Instead, we point into the bindings structure. As
scopes are popped, we update these structures and gather the decls
that matter at that time. */
struct GTY(()) c_spot_bindings {
/* The currently open scope which holds bindings defined when the
label was defined or the goto statement was found. */
struct c_scope *scope;
/* The bindings in the scope field which were defined at the point
of the label or goto. This lets us look at older or newer
bindings in the scope, as appropriate. */
struct c_binding *bindings_in_scope;
/* The number of statement expressions that have started since this
label or goto statement was defined. This is zero if we are at
the same statement expression level. It is positive if we are in
a statement expression started since this spot. It is negative
if this spot was in a statement expression and we have left
it. */
int stmt_exprs;
/* Whether we started in a statement expression but are no longer in
it. This is set to true if stmt_exprs ever goes negative. */
bool left_stmt_expr;
};
/* This structure is used to keep track of bindings seen when a goto
statement is defined. This is only used if we see the goto
statement before we see the label. */
struct GTY(()) c_goto_bindings {
/* The location of the goto statement. */
location_t loc;
/* The bindings of the goto statement. */
struct c_spot_bindings goto_bindings;
};
typedef struct c_goto_bindings *c_goto_bindings_p;
/* The additional information we keep track of for a label binding.
These fields are updated as scopes are popped. */
struct GTY(()) c_label_vars {
/* The shadowed c_label_vars, when one label shadows another (which
can only happen using a __label__ declaration). */
struct c_label_vars *shadowed;
/* The bindings when the label was defined. */
struct c_spot_bindings label_bindings;
/* A list of decls that we care about: decls about which we should
warn if a goto branches to this label from later in the function.
Decls are added to this list as scopes are popped. We only add
the decls that matter. */
vec<tree, va_gc> *decls_in_scope;
/* A list of goto statements to this label. This is only used for
goto statements seen before the label was defined, so that we can
issue appropriate warnings for them. */
vec<c_goto_bindings_p, va_gc> *gotos;
};
/* Each c_scope structure describes the complete contents of one
scope. Four scopes are distinguished specially: the innermost or
current scope, the innermost function scope, the file scope (always
the second to outermost) and the outermost or external scope.
Most declarations are recorded in the current scope.
All normal label declarations are recorded in the innermost
function scope, as are bindings of undeclared identifiers to
error_mark_node. (GCC permits nested functions as an extension,
hence the 'innermost' qualifier.) Explicitly declared labels
(using the __label__ extension) appear in the current scope.
Being in the file scope (current_scope == file_scope) causes
special behavior in several places below. Also, under some
conditions the Objective-C front end records declarations in the
file scope even though that isn't the current scope.
All declarations with external linkage are recorded in the external
scope, even if they aren't visible there; this models the fact that
such declarations are visible to the entire program, and (with a
bit of cleverness, see pushdecl) allows diagnosis of some violations
of C99 6.2.2p7 and 6.2.7p2:
If, within the same translation unit, the same identifier appears
with both internal and external linkage, the behavior is
undefined.
All declarations that refer to the same object or function shall
have compatible type; otherwise, the behavior is undefined.
Initially only the built-in declarations, which describe compiler
intrinsic functions plus a subset of the standard library, are in
this scope.
The order of the blocks list matters, and it is frequently appended
to. To avoid having to walk all the way to the end of the list on
each insertion, or reverse the list later, we maintain a pointer to
the last list entry. (FIXME: It should be feasible to use a reversed
list here.)
The bindings list is strictly in reverse order of declarations;
pop_scope relies on this. */
struct GTY((chain_next ("%h.outer"))) c_scope {
/* The scope containing this one. */
struct c_scope *outer;
/* The next outermost function scope. */
struct c_scope *outer_function;
/* All bindings in this scope. */
struct c_binding *bindings;
/* For each scope (except the global one), a chain of BLOCK nodes
for all the scopes that were entered and exited one level down. */
tree blocks;
tree blocks_last;
/* The depth of this scope. Used to keep the ->shadowed chain of
bindings sorted innermost to outermost. */
unsigned int depth : 28;
/* True if we are currently filling this scope with parameter
declarations. */
BOOL_BITFIELD parm_flag : 1;
/* True if we saw [*] in this scope. Used to give an error messages
if these appears in a function definition. */
BOOL_BITFIELD had_vla_unspec : 1;
/* True if we already complained about forward parameter decls
in this scope. This prevents double warnings on
foo (int a; int b; ...) */
BOOL_BITFIELD warned_forward_parm_decls : 1;
/* True if this is the outermost block scope of a function body.
This scope contains the parameters, the local variables declared
in the outermost block, and all the labels (except those in
nested functions, or declared at block scope with __label__). */
BOOL_BITFIELD function_body : 1;
/* True means make a BLOCK for this scope no matter what. */
BOOL_BITFIELD keep : 1;
/* True means that an unsuffixed float constant is _Decimal64. */
BOOL_BITFIELD float_const_decimal64 : 1;
/* True if this scope has any label bindings. This is used to speed
up searching for labels when popping scopes, particularly since
labels are normally only found at function scope. */
BOOL_BITFIELD has_label_bindings : 1;
/* True if we should issue a warning if a goto statement crosses any
of the bindings. We still need to check the list of bindings to
find the specific ones we need to warn about. This is true if
decl_jump_unsafe would return true for any of the bindings. This
is used to avoid looping over all the bindings unnecessarily. */
BOOL_BITFIELD has_jump_unsafe_decl : 1;
};
/* The scope currently in effect. */
static GTY(()) struct c_scope *current_scope;
/* The innermost function scope. Ordinary (not explicitly declared)
labels, bindings to error_mark_node, and the lazily-created
bindings of __func__ and its friends get this scope. */
static GTY(()) struct c_scope *current_function_scope;
/* The C file scope. This is reset for each input translation unit. */
static GTY(()) struct c_scope *file_scope;
/* The outermost scope. This is used for all declarations with
external linkage, and only these, hence the name. */
static GTY(()) struct c_scope *external_scope;
/* A chain of c_scope structures awaiting reuse. */
static GTY((deletable)) struct c_scope *scope_freelist;
/* A chain of c_binding structures awaiting reuse. */
static GTY((deletable)) struct c_binding *binding_freelist;
/* Append VAR to LIST in scope SCOPE. */
#define SCOPE_LIST_APPEND(scope, list, decl) do { \
struct c_scope *s_ = (scope); \
tree d_ = (decl); \
if (s_->list##_last) \
BLOCK_CHAIN (s_->list##_last) = d_; \
else \
s_->list = d_; \
s_->list##_last = d_; \
} while (0)
/* Concatenate FROM in scope FSCOPE onto TO in scope TSCOPE. */
#define SCOPE_LIST_CONCAT(tscope, to, fscope, from) do { \
struct c_scope *t_ = (tscope); \
struct c_scope *f_ = (fscope); \
if (t_->to##_last) \
BLOCK_CHAIN (t_->to##_last) = f_->from; \
else \
t_->to = f_->from; \
t_->to##_last = f_->from##_last; \
} while (0)
/* A c_inline_static structure stores details of a static identifier
referenced in a definition of a function that may be an inline
definition if no subsequent declaration of that function uses
"extern" or does not use "inline". */
struct GTY((chain_next ("%h.next"))) c_inline_static {
/* The location for a diagnostic. */
location_t location;
/* The function that may be an inline definition. */
tree function;
/* The object or function referenced. */
tree static_decl;
/* What sort of reference this is. */
enum c_inline_static_type type;
/* The next such structure or NULL. */
struct c_inline_static *next;
};
/* List of static identifiers used or referenced in functions that may
be inline definitions. */
static GTY(()) struct c_inline_static *c_inline_statics;
/* True means unconditionally make a BLOCK for the next scope pushed. */
static bool keep_next_level_flag;
/* True means the next call to push_scope will be the outermost scope
of a function body, so do not push a new scope, merely cease
expecting parameter decls. */
static bool next_is_function_body;
/* A vector of pointers to c_binding structures. */
typedef struct c_binding *c_binding_ptr;
/* Information that we keep for a struct or union while it is being
parsed. */
struct c_struct_parse_info
{
/* If warn_cxx_compat, a list of types defined within this
struct. */
vec<tree> struct_types;
/* If warn_cxx_compat, a list of field names which have bindings,
and which are defined in this struct, but which are not defined
in any enclosing struct. This is used to clear the in_struct
field of the c_bindings structure. */
vec<c_binding_ptr> fields;
/* If warn_cxx_compat, a list of typedef names used when defining
fields in this struct. */
vec<tree> typedefs_seen;
};
/* Information for the struct or union currently being parsed, or
NULL if not parsing a struct or union. */
static struct c_struct_parse_info *struct_parse_info;
/* Forward declarations. */
static tree lookup_name_in_scope (tree, struct c_scope *);
static tree c_make_fname_decl (location_t, tree, int);
static tree grokdeclarator (const struct c_declarator *,
struct c_declspecs *,
enum decl_context, bool, tree *, tree *, tree *,
bool *, enum deprecated_states);
static tree grokparms (struct c_arg_info *, bool);
static void layout_array_type (tree);
static void warn_defaults_to (location_t, int, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
/* T is a statement. Add it to the statement-tree. This is the
C/ObjC version--C++ has a slightly different version of this
function. */
tree
add_stmt (tree t)
{
enum tree_code code = TREE_CODE (t);
if (CAN_HAVE_LOCATION_P (t) && code != LABEL_EXPR)
{
if (!EXPR_HAS_LOCATION (t))
SET_EXPR_LOCATION (t, input_location);
}
if (code == LABEL_EXPR || code == CASE_LABEL_EXPR)
STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1;
/* Add T to the statement-tree. Non-side-effect statements need to be
recorded during statement expressions. */
if (!building_stmt_list_p ())
push_stmt_list ();
append_to_statement_list_force (t, &cur_stmt_list);
return t;
}
/* Build a pointer type using the default pointer mode. */
static tree
c_build_pointer_type (tree to_type)
{
addr_space_t as = to_type == error_mark_node? ADDR_SPACE_GENERIC
: TYPE_ADDR_SPACE (to_type);
machine_mode pointer_mode;
if (as != ADDR_SPACE_GENERIC || c_default_pointer_mode == VOIDmode)
pointer_mode = targetm.addr_space.pointer_mode (as);
else
pointer_mode = c_default_pointer_mode;
return build_pointer_type_for_mode (to_type, pointer_mode, false);
}
/* Return true if we will want to say something if a goto statement
crosses DECL. */
static bool
decl_jump_unsafe (tree decl)
{
if (error_operand_p (decl))
return false;
/* Always warn about crossing variably modified types. */
if ((TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == TYPE_DECL)
&& variably_modified_type_p (TREE_TYPE (decl), NULL_TREE))
return true;
/* Otherwise, only warn if -Wgoto-misses-init and this is an
initialized automatic decl. */
if (warn_jump_misses_init
&& TREE_CODE (decl) == VAR_DECL
&& !TREE_STATIC (decl)
&& DECL_INITIAL (decl) != NULL_TREE)
return true;
return false;
}
void
c_print_identifier (FILE *file, tree node, int indent)
{
void (*save) (enum c_oracle_request, tree identifier);
/* Temporarily hide any binding oracle. Without this, calls to
debug_tree from the debugger will end up calling into the oracle,
making for a confusing debug session. As the oracle isn't needed
here for normal operation, it's simplest to suppress it. */
save = c_binding_oracle;
c_binding_oracle = NULL;
print_node (file, "symbol", I_SYMBOL_DECL (node), indent + 4);
print_node (file, "tag", I_TAG_DECL (node), indent + 4);
print_node (file, "label", I_LABEL_DECL (node), indent + 4);
if (C_IS_RESERVED_WORD (node) && C_RID_CODE (node) != RID_CXX_COMPAT_WARN)
{
tree rid = ridpointers[C_RID_CODE (node)];
indent_to (file, indent + 4);
fprintf (file, "rid " HOST_PTR_PRINTF " \"%s\"",
(void *) rid, IDENTIFIER_POINTER (rid));
}
c_binding_oracle = save;
}
/* Establish a binding between NAME, an IDENTIFIER_NODE, and DECL,
which may be any of several kinds of DECL or TYPE or error_mark_node,
in the scope SCOPE. */
static void
bind (tree name, tree decl, struct c_scope *scope, bool invisible,
bool nested, location_t locus)
{
struct c_binding *b, **here;
if (binding_freelist)
{
b = binding_freelist;
binding_freelist = b->prev;
}
else
b = ggc_alloc<c_binding> ();
b->shadowed = 0;
b->decl = decl;
b->id = name;
b->depth = scope->depth;
b->invisible = invisible;
b->nested = nested;
b->inner_comp = 0;
b->in_struct = 0;
b->locus = locus;
b->u.type = NULL;
b->prev = scope->bindings;
scope->bindings = b;
if (decl_jump_unsafe (decl))
scope->has_jump_unsafe_decl = 1;
if (!name)
return;
switch (TREE_CODE (decl))
{
case LABEL_DECL: here = &I_LABEL_BINDING (name); break;
case ENUMERAL_TYPE:
case UNION_TYPE:
case RECORD_TYPE: here = &I_TAG_BINDING (name); break;
case VAR_DECL:
case FUNCTION_DECL:
case TYPE_DECL:
case CONST_DECL:
case PARM_DECL:
case ERROR_MARK: here = &I_SYMBOL_BINDING (name); break;
default:
gcc_unreachable ();
}
/* Locate the appropriate place in the chain of shadowed decls
to insert this binding. Normally, scope == current_scope and
this does nothing. */
while (*here && (*here)->depth > scope->depth)
here = &(*here)->shadowed;
b->shadowed = *here;
*here = b;
}
/* Clear the binding structure B, stick it on the binding_freelist,
and return the former value of b->prev. This is used by pop_scope
and get_parm_info to iterate destructively over all the bindings
from a given scope. */
static struct c_binding *
free_binding_and_advance (struct c_binding *b)
{
struct c_binding *prev = b->prev;
memset (b, 0, sizeof (struct c_binding));
b->prev = binding_freelist;
binding_freelist = b;
return prev;
}
/* Bind a label. Like bind, but skip fields which aren't used for
labels, and add the LABEL_VARS value. */
static void
bind_label (tree name, tree label, struct c_scope *scope,
struct c_label_vars *label_vars)
{
struct c_binding *b;
bind (name, label, scope, /*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
scope->has_label_bindings = true;
b = scope->bindings;
gcc_assert (b->decl == label);
label_vars->shadowed = b->u.label;
b->u.label = label_vars;
}
/* Hook called at end of compilation to assume 1 elt
for a file-scope tentative array defn that wasn't complete before. */
void
c_finish_incomplete_decl (tree decl)
{
if (TREE_CODE (decl) == VAR_DECL)
{
tree type = TREE_TYPE (decl);
if (type != error_mark_node
&& TREE_CODE (type) == ARRAY_TYPE
&& !DECL_EXTERNAL (decl)
&& TYPE_DOMAIN (type) == 0)
{
warning_at (DECL_SOURCE_LOCATION (decl),
0, "array %q+D assumed to have one element", decl);
complete_array_type (&TREE_TYPE (decl), NULL_TREE, true);
relayout_decl (decl);
}
}
}
/* Record that inline function FUNC contains a reference (location
LOC) to static DECL (file-scope or function-local according to
TYPE). */
void
record_inline_static (location_t loc, tree func, tree decl,
enum c_inline_static_type type)
{
c_inline_static *csi = ggc_alloc<c_inline_static> ();
csi->location = loc;
csi->function = func;
csi->static_decl = decl;
csi->type = type;
csi->next = c_inline_statics;
c_inline_statics = csi;
}
/* Check for references to static declarations in inline functions at
the end of the translation unit and diagnose them if the functions
are still inline definitions. */
static void
check_inline_statics (void)
{
struct c_inline_static *csi;
for (csi = c_inline_statics; csi; csi = csi->next)
{
if (DECL_EXTERNAL (csi->function))
switch (csi->type)
{
case csi_internal:
pedwarn (csi->location, 0,
"%qD is static but used in inline function %qD "
"which is not static", csi->static_decl, csi->function);
break;
case csi_modifiable:
pedwarn (csi->location, 0,
"%q+D is static but declared in inline function %qD "
"which is not static", csi->static_decl, csi->function);
break;
default:
gcc_unreachable ();
}
}
c_inline_statics = NULL;
}
/* Fill in a c_spot_bindings structure. If DEFINING is true, set it
for the current state, otherwise set it to uninitialized. */
static void
set_spot_bindings (struct c_spot_bindings *p, bool defining)
{
if (defining)
{
p->scope = current_scope;
p->bindings_in_scope = current_scope->bindings;
}
else
{
p->scope = NULL;
p->bindings_in_scope = NULL;
}
p->stmt_exprs = 0;
p->left_stmt_expr = false;
}
/* Update spot bindings P as we pop out of SCOPE. Return true if we
should push decls for a label. */
static bool
update_spot_bindings (struct c_scope *scope, struct c_spot_bindings *p)
{
if (p->scope != scope)
{
/* This label or goto is defined in some other scope, or it is a
label which is not yet defined. There is nothing to
update. */
return false;
}
/* Adjust the spot bindings to refer to the bindings already defined
in the enclosing scope. */
p->scope = scope->outer;
p->bindings_in_scope = p->scope->bindings;
return true;
}
/* The Objective-C front-end often needs to determine the current scope. */
void *
objc_get_current_scope (void)
{
return current_scope;
}
/* The following function is used only by Objective-C. It needs to live here
because it accesses the innards of c_scope. */
void
objc_mark_locals_volatile (void *enclosing_blk)
{
struct c_scope *scope;
struct c_binding *b;
for (scope = current_scope;
scope && scope != enclosing_blk;
scope = scope->outer)
{
for (b = scope->bindings; b; b = b->prev)
objc_volatilize_decl (b->decl);
/* Do not climb up past the current function. */
if (scope->function_body)
break;
}
}
/* Return true if we are in the global binding level. */
bool
global_bindings_p (void)
{
return current_scope == file_scope;
}
void
keep_next_level (void)
{
keep_next_level_flag = true;
}
/* Set the flag for the FLOAT_CONST_DECIMAL64 pragma being ON. */
void
set_float_const_decimal64 (void)
{
current_scope->float_const_decimal64 = true;
}
/* Clear the flag for the FLOAT_CONST_DECIMAL64 pragma. */
void
clear_float_const_decimal64 (void)
{
current_scope->float_const_decimal64 = false;
}
/* Return nonzero if an unsuffixed float constant is _Decimal64. */
bool
float_const_decimal64_p (void)
{
return current_scope->float_const_decimal64;
}
/* Identify this scope as currently being filled with parameters. */
void
declare_parm_level (void)
{
current_scope->parm_flag = true;
}
void
push_scope (void)
{
if (next_is_function_body)
{
/* This is the transition from the parameters to the top level
of the function body. These are the same scope
(C99 6.2.1p4,6) so we do not push another scope structure.
next_is_function_body is set only by store_parm_decls, which
in turn is called when and only when we are about to
encounter the opening curly brace for the function body.
The outermost block of a function always gets a BLOCK node,
because the debugging output routines expect that each
function has at least one BLOCK. */
current_scope->parm_flag = false;
current_scope->function_body = true;
current_scope->keep = true;
current_scope->outer_function = current_function_scope;
current_function_scope = current_scope;
keep_next_level_flag = false;
next_is_function_body = false;
/* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */
if (current_scope->outer)
current_scope->float_const_decimal64
= current_scope->outer->float_const_decimal64;
else
current_scope->float_const_decimal64 = false;
}
else
{
struct c_scope *scope;
if (scope_freelist)
{
scope = scope_freelist;
scope_freelist = scope->outer;
}
else
scope = ggc_cleared_alloc<c_scope> ();
/* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */
if (current_scope)
scope->float_const_decimal64 = current_scope->float_const_decimal64;
else
scope->float_const_decimal64 = false;
scope->keep = keep_next_level_flag;
scope->outer = current_scope;
scope->depth = current_scope ? (current_scope->depth + 1) : 0;
/* Check for scope depth overflow. Unlikely (2^28 == 268,435,456) but
possible. */
if (current_scope && scope->depth == 0)
{
scope->depth--;
sorry ("GCC supports only %u nested scopes", scope->depth);
}
current_scope = scope;
keep_next_level_flag = false;
}
}
/* This is called when we are leaving SCOPE. For each label defined
in SCOPE, add any appropriate decls to its decls_in_scope fields.
These are the decls whose initialization will be skipped by a goto
later in the function. */
static void
update_label_decls (struct c_scope *scope)
{
struct c_scope *s;
s = scope;
while (s != NULL)
{
if (s->has_label_bindings)
{
struct c_binding *b;
for (b = s->bindings; b != NULL; b = b->prev)
{
struct c_label_vars *label_vars;
struct c_binding *b1;
bool hjud;
unsigned int ix;
struct c_goto_bindings *g;
if (TREE_CODE (b->decl) != LABEL_DECL)
continue;
label_vars = b->u.label;
b1 = label_vars->label_bindings.bindings_in_scope;
if (label_vars->label_bindings.scope == NULL)
hjud = false;
else
hjud = label_vars->label_bindings.scope->has_jump_unsafe_decl;
if (update_spot_bindings (scope, &label_vars->label_bindings))
{
/* This label is defined in this scope. */
if (hjud)
{
for (; b1 != NULL; b1 = b1->prev)
{
/* A goto from later in the function to this
label will never see the initialization
of B1, if any. Save it to issue a
warning if needed. */
if (decl_jump_unsafe (b1->decl))
vec_safe_push(label_vars->decls_in_scope, b1->decl);
}
}
}
/* Update the bindings of any goto statements associated
with this label. */
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
update_spot_bindings (scope, &g->goto_bindings);
}
}
/* Don't search beyond the current function. */
if (s == current_function_scope)
break;
s = s->outer;
}
}
/* Set the TYPE_CONTEXT of all of TYPE's variants to CONTEXT. */
static void
set_type_context (tree type, tree context)
{
for (type = TYPE_MAIN_VARIANT (type); type;
type = TYPE_NEXT_VARIANT (type))
TYPE_CONTEXT (type) = context;
}
/* Exit a scope. Restore the state of the identifier-decl mappings
that were in effect when this scope was entered. Return a BLOCK
node containing all the DECLs in this scope that are of interest
to debug info generation. */
tree
pop_scope (void)
{
struct c_scope *scope = current_scope;
tree block, context, p;
struct c_binding *b;
bool functionbody = scope->function_body;
bool keep = functionbody || scope->keep || scope->bindings;
update_label_decls (scope);
/* If appropriate, create a BLOCK to record the decls for the life
of this function. */
block = 0;
if (keep)
{
block = make_node (BLOCK);
BLOCK_SUBBLOCKS (block) = scope->blocks;
TREE_USED (block) = 1;
/* In each subblock, record that this is its superior. */
for (p = scope->blocks; p; p = BLOCK_CHAIN (p))
BLOCK_SUPERCONTEXT (p) = block;
BLOCK_VARS (block) = 0;
}
/* The TYPE_CONTEXTs for all of the tagged types belonging to this
scope must be set so that they point to the appropriate
construct, i.e. either to the current FUNCTION_DECL node, or
else to the BLOCK node we just constructed.
Note that for tagged types whose scope is just the formal
parameter list for some function type specification, we can't
properly set their TYPE_CONTEXTs here, because we don't have a
pointer to the appropriate FUNCTION_TYPE node readily available
to us. For those cases, the TYPE_CONTEXTs of the relevant tagged
type nodes get set in `grokdeclarator' as soon as we have created
the FUNCTION_TYPE node which will represent the "scope" for these
"parameter list local" tagged types. */
if (scope->function_body)
context = current_function_decl;
else if (scope == file_scope)
{
tree file_decl = build_translation_unit_decl (NULL_TREE);
context = file_decl;
}
else
context = block;
/* Clear all bindings in this scope. */
for (b = scope->bindings; b; b = free_binding_and_advance (b))
{
p = b->decl;
switch (TREE_CODE (p))
{
case LABEL_DECL:
/* Warnings for unused labels, errors for undefined labels. */
if (TREE_USED (p) && !DECL_INITIAL (p))
{
error ("label %q+D used but not defined", p);
DECL_INITIAL (p) = error_mark_node;
}
else
warn_for_unused_label (p);
/* Labels go in BLOCK_VARS. */
DECL_CHAIN (p) = BLOCK_VARS (block);
BLOCK_VARS (block) = p;
gcc_assert (I_LABEL_BINDING (b->id) == b);
I_LABEL_BINDING (b->id) = b->shadowed;
/* Also pop back to the shadowed label_vars. */
release_tree_vector (b->u.label->decls_in_scope);
b->u.label = b->u.label->shadowed;
break;
case ENUMERAL_TYPE:
case UNION_TYPE:
case RECORD_TYPE:
set_type_context (p, context);
/* Types may not have tag-names, in which case the type
appears in the bindings list with b->id NULL. */
if (b->id)
{
gcc_assert (I_TAG_BINDING (b->id) == b);
I_TAG_BINDING (b->id) = b->shadowed;
}
break;
case FUNCTION_DECL:
/* Propagate TREE_ADDRESSABLE from nested functions to their
containing functions. */
if (!TREE_ASM_WRITTEN (p)
&& DECL_INITIAL (p) != 0
&& TREE_ADDRESSABLE (p)
&& DECL_ABSTRACT_ORIGIN (p) != 0
&& DECL_ABSTRACT_ORIGIN (p) != p)
TREE_ADDRESSABLE (DECL_ABSTRACT_ORIGIN (p)) = 1;
if (!DECL_EXTERNAL (p)
&& !DECL_INITIAL (p)
&& scope != file_scope
&& scope != external_scope)
{
error ("nested function %q+D declared but never defined", p);
undef_nested_function = true;
}
else if (DECL_DECLARED_INLINE_P (p)
&& TREE_PUBLIC (p)
&& !DECL_INITIAL (p))
{
/* C99 6.7.4p6: "a function with external linkage... declared
with an inline function specifier ... shall also be defined
in the same translation unit." */
if (!flag_gnu89_inline
&& !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (p))
&& scope != external_scope)
pedwarn (input_location, 0,
"inline function %q+D declared but never defined", p);
DECL_EXTERNAL (p) = 1;
}
goto common_symbol;
case VAR_DECL:
/* Warnings for unused variables. */
if ((!TREE_USED (p) || !DECL_READ_P (p))
&& !TREE_NO_WARNING (p)
&& !DECL_IN_SYSTEM_HEADER (p)
&& DECL_NAME (p)
&& !DECL_ARTIFICIAL (p)
&& scope != file_scope
&& scope != external_scope)
{
if (!TREE_USED (p))
warning (OPT_Wunused_variable, "unused variable %q+D", p);
else if (DECL_CONTEXT (p) == current_function_decl)
warning_at (DECL_SOURCE_LOCATION (p),
OPT_Wunused_but_set_variable,
"variable %qD set but not used", p);
}
if (b->inner_comp)
{
error ("type of array %q+D completed incompatibly with"
" implicit initialization", p);
}
/* Fall through. */
case TYPE_DECL:
case CONST_DECL:
common_symbol:
/* All of these go in BLOCK_VARS, but only if this is the
binding in the home scope. */
if (!b->nested)
{
DECL_CHAIN (p) = BLOCK_VARS (block);
BLOCK_VARS (block) = p;
}
else if (VAR_OR_FUNCTION_DECL_P (p) && scope != file_scope)
{
/* For block local externs add a special
DECL_EXTERNAL decl for debug info generation. */
tree extp = copy_node (p);
DECL_EXTERNAL (extp) = 1;
TREE_STATIC (extp) = 0;
TREE_PUBLIC (extp) = 1;
DECL_INITIAL (extp) = NULL_TREE;
DECL_LANG_SPECIFIC (extp) = NULL;
DECL_CONTEXT (extp) = current_function_decl;
if (TREE_CODE (p) == FUNCTION_DECL)
{
DECL_RESULT (extp) = NULL_TREE;
DECL_SAVED_TREE (extp) = NULL_TREE;
DECL_STRUCT_FUNCTION (extp) = NULL;
}
if (b->locus != UNKNOWN_LOCATION)
DECL_SOURCE_LOCATION (extp) = b->locus;
DECL_CHAIN (extp) = BLOCK_VARS (block);
BLOCK_VARS (block) = extp;
}
/* If this is the file scope set DECL_CONTEXT of each decl to
the TRANSLATION_UNIT_DECL. This makes same_translation_unit_p
work. */
if (scope == file_scope)
{
DECL_CONTEXT (p) = context;
if (TREE_CODE (p) == TYPE_DECL
&& TREE_TYPE (p) != error_mark_node)
set_type_context (TREE_TYPE (p), context);
}
/* Fall through. */
/* Parameters go in DECL_ARGUMENTS, not BLOCK_VARS, and have
already been put there by store_parm_decls. Unused-
parameter warnings are handled by function.c.
error_mark_node obviously does not go in BLOCK_VARS and
does not get unused-variable warnings. */
case PARM_DECL:
case ERROR_MARK:
/* It is possible for a decl not to have a name. We get
here with b->id NULL in this case. */
if (b->id)
{
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
if (b->shadowed && b->shadowed->u.type)
TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type;
}
break;
default:
gcc_unreachable ();
}
}
/* Dispose of the block that we just made inside some higher level. */
if ((scope->function_body || scope == file_scope) && context)
{
DECL_INITIAL (context) = block;
BLOCK_SUPERCONTEXT (block) = context;
}
else if (scope->outer)
{
if (block)
SCOPE_LIST_APPEND (scope->outer, blocks, block);
/* If we did not make a block for the scope just exited, any
blocks made for inner scopes must be carried forward so they
will later become subblocks of something else. */
else if (scope->blocks)
SCOPE_LIST_CONCAT (scope->outer, blocks, scope, blocks);
}
/* Pop the current scope, and free the structure for reuse. */
current_scope = scope->outer;
if (scope->function_body)
current_function_scope = scope->outer_function;
memset (scope, 0, sizeof (struct c_scope));
scope->outer = scope_freelist;
scope_freelist = scope;
return block;
}
void
push_file_scope (void)
{
tree decl;
if (file_scope)
return;
push_scope ();
file_scope = current_scope;
start_fname_decls ();
for (decl = visible_builtins; decl; decl = DECL_CHAIN (decl))
bind (DECL_NAME (decl), decl, file_scope,
/*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl));
}
void
pop_file_scope (void)
{
/* In case there were missing closebraces, get us back to the global
binding level. */
while (current_scope != file_scope)
pop_scope ();
/* __FUNCTION__ is defined at file scope (""). This
call may not be necessary as my tests indicate it
still works without it. */
finish_fname_decls ();
check_inline_statics ();
/* This is the point to write out a PCH if we're doing that.
In that case we do not want to do anything else. */
if (pch_file)
{
c_common_write_pch ();
return;
}
/* Pop off the file scope and close this translation unit. */
pop_scope ();
file_scope = 0;
maybe_apply_pending_pragma_weaks ();
}
/* Adjust the bindings for the start of a statement expression. */
void
c_bindings_start_stmt_expr (struct c_spot_bindings* switch_bindings)
{
struct c_scope *scope;
for (scope = current_scope; scope != NULL; scope = scope->outer)
{
struct c_binding *b;
if (!scope->has_label_bindings)
continue;
for (b = scope->bindings; b != NULL; b = b->prev)
{
struct c_label_vars *label_vars;
unsigned int ix;
struct c_goto_bindings *g;
if (TREE_CODE (b->decl) != LABEL_DECL)
continue;
label_vars = b->u.label;
++label_vars->label_bindings.stmt_exprs;
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
++g->goto_bindings.stmt_exprs;
}
}
if (switch_bindings != NULL)
++switch_bindings->stmt_exprs;
}
/* Adjust the bindings for the end of a statement expression. */
void
c_bindings_end_stmt_expr (struct c_spot_bindings *switch_bindings)
{
struct c_scope *scope;
for (scope = current_scope; scope != NULL; scope = scope->outer)
{
struct c_binding *b;
if (!scope->has_label_bindings)
continue;
for (b = scope->bindings; b != NULL; b = b->prev)
{
struct c_label_vars *label_vars;
unsigned int ix;
struct c_goto_bindings *g;
if (TREE_CODE (b->decl) != LABEL_DECL)
continue;
label_vars = b->u.label;
--label_vars->label_bindings.stmt_exprs;
if (label_vars->label_bindings.stmt_exprs < 0)
{
label_vars->label_bindings.left_stmt_expr = true;
label_vars->label_bindings.stmt_exprs = 0;
}
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
{
--g->goto_bindings.stmt_exprs;
if (g->goto_bindings.stmt_exprs < 0)
{
g->goto_bindings.left_stmt_expr = true;
g->goto_bindings.stmt_exprs = 0;
}
}
}
}
if (switch_bindings != NULL)
{
--switch_bindings->stmt_exprs;
gcc_assert (switch_bindings->stmt_exprs >= 0);
}
}
/* Push a definition or a declaration of struct, union or enum tag "name".
"type" should be the type node.
We assume that the tag "name" is not already defined, and has a location
of LOC.
Note that the definition may really be just a forward reference.
In that case, the TYPE_SIZE will be zero. */
static void
pushtag (location_t loc, tree name, tree type)
{
/* Record the identifier as the type's name if it has none. */
if (name && !TYPE_NAME (type))
TYPE_NAME (type) = name;
bind (name, type, current_scope, /*invisible=*/false, /*nested=*/false, loc);
/* Create a fake NULL-named TYPE_DECL node whose TREE_TYPE will be the
tagged type we just added to the current scope. This fake
NULL-named TYPE_DECL node helps dwarfout.c to know when it needs
to output a representation of a tagged type, and it also gives
us a convenient place to record the "scope start" address for the
tagged type. */
TYPE_STUB_DECL (type) = pushdecl (build_decl (loc,
TYPE_DECL, NULL_TREE, type));
/* An approximation for now, so we can tell this is a function-scope tag.
This will be updated in pop_scope. */
TYPE_CONTEXT (type) = DECL_CONTEXT (TYPE_STUB_DECL (type));
if (warn_cxx_compat && name != NULL_TREE)
{
struct c_binding *b = I_SYMBOL_BINDING (name);
if (b != NULL
&& b->decl != NULL_TREE
&& TREE_CODE (b->decl) == TYPE_DECL
&& (B_IN_CURRENT_SCOPE (b)
|| (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b)))
&& (TYPE_MAIN_VARIANT (TREE_TYPE (b->decl))
!= TYPE_MAIN_VARIANT (type)))
{
warning_at (loc, OPT_Wc___compat,
("using %qD as both a typedef and a tag is "
"invalid in C++"),
b->decl);
if (b->locus != UNKNOWN_LOCATION)
inform (b->locus, "originally defined here");
}
}
}
/* An exported interface to pushtag. This is used by the gdb plugin's
binding oracle to introduce a new tag binding. */
void
c_pushtag (location_t loc, tree name, tree type)
{
pushtag (loc, name, type);
}
/* An exported interface to bind a declaration. LOC is the location
to use. DECL is the declaration to bind. The decl's name is used
to determine how it is bound. If DECL is a VAR_DECL, then
IS_GLOBAL determines whether the decl is put into the global (file
and external) scope or the current function's scope; if DECL is not
a VAR_DECL then it is always put into the file scope. */
void
c_bind (location_t loc, tree decl, bool is_global)
{
struct c_scope *scope;
bool nested = false;
if (TREE_CODE (decl) != VAR_DECL || current_function_scope == NULL)
{
/* Types and functions are always considered to be global. */
scope = file_scope;
DECL_EXTERNAL (decl) = 1;
TREE_PUBLIC (decl) = 1;
}
else if (is_global)
{
/* Also bind it into the external scope. */
bind (DECL_NAME (decl), decl, external_scope, true, false, loc);
nested = true;
scope = file_scope;
DECL_EXTERNAL (decl) = 1;
TREE_PUBLIC (decl) = 1;
}
else
{
DECL_CONTEXT (decl) = current_function_decl;
TREE_PUBLIC (decl) = 0;
scope = current_function_scope;
}
bind (DECL_NAME (decl), decl, scope, false, nested, loc);
}
/* Subroutine of compare_decls. Allow harmless mismatches in return
and argument types provided that the type modes match. This function
return a unified type given a suitable match, and 0 otherwise. */
static tree
match_builtin_function_types (tree newtype, tree oldtype)
{
tree newrettype, oldrettype;
tree newargs, oldargs;
tree trytype, tryargs;
/* Accept the return type of the new declaration if same modes. */
oldrettype = TREE_TYPE (oldtype);
newrettype = TREE_TYPE (newtype);
if (TYPE_MODE (oldrettype) != TYPE_MODE (newrettype))
return 0;
oldargs = TYPE_ARG_TYPES (oldtype);
newargs = TYPE_ARG_TYPES (newtype);
tryargs = newargs;
while (oldargs || newargs)
{
if (!oldargs
|| !newargs
|| !TREE_VALUE (oldargs)
|| !TREE_VALUE (newargs)
|| TYPE_MODE (TREE_VALUE (oldargs))
!= TYPE_MODE (TREE_VALUE (newargs)))
return 0;
oldargs = TREE_CHAIN (oldargs);
newargs = TREE_CHAIN (newargs);
}
trytype = build_function_type (newrettype, tryargs);
return build_type_attribute_variant (trytype, TYPE_ATTRIBUTES (oldtype));
}
/* Subroutine of diagnose_mismatched_decls. Check for function type
mismatch involving an empty arglist vs a nonempty one and give clearer
diagnostics. */
static void
diagnose_arglist_conflict (tree newdecl, tree olddecl,
tree newtype, tree oldtype)
{
tree t;
if (TREE_CODE (olddecl) != FUNCTION_DECL
|| !comptypes (TREE_TYPE (oldtype), TREE_TYPE (newtype))
|| !((!prototype_p (oldtype) && DECL_INITIAL (olddecl) == 0)
|| (!prototype_p (newtype) && DECL_INITIAL (newdecl) == 0)))
return;
t = TYPE_ARG_TYPES (oldtype);
if (t == 0)
t = TYPE_ARG_TYPES (newtype);
for (; t; t = TREE_CHAIN (t))
{
tree type = TREE_VALUE (t);
if (TREE_CHAIN (t) == 0
&& TYPE_MAIN_VARIANT (type) != void_type_node)
{
inform (input_location, "a parameter list with an ellipsis can%'t match "
"an empty parameter name list declaration");
break;
}
if (c_type_promotes_to (type) != type)
{
inform (input_location, "an argument type that has a default promotion can%'t match "
"an empty parameter name list declaration");
break;
}
}
}
/* Another subroutine of diagnose_mismatched_decls. OLDDECL is an
old-style function definition, NEWDECL is a prototype declaration.
Diagnose inconsistencies in the argument list. Returns TRUE if
the prototype is compatible, FALSE if not. */
static bool
validate_proto_after_old_defn (tree newdecl, tree newtype, tree oldtype)
{
tree newargs, oldargs;
int i;
#define END_OF_ARGLIST(t) ((t) == void_type_node)
oldargs = TYPE_ACTUAL_ARG_TYPES (oldtype);
newargs = TYPE_ARG_TYPES (newtype);
i = 1;
for (;;)
{
tree oldargtype = TREE_VALUE (oldargs);
tree newargtype = TREE_VALUE (newargs);
if (oldargtype == error_mark_node || newargtype == error_mark_node)
return false;
oldargtype = (TYPE_ATOMIC (oldargtype)
? c_build_qualified_type (TYPE_MAIN_VARIANT (oldargtype),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (oldargtype));
newargtype = (TYPE_ATOMIC (newargtype)
? c_build_qualified_type (TYPE_MAIN_VARIANT (newargtype),
TYPE_QUAL_ATOMIC)
: TYPE_MAIN_VARIANT (newargtype));
if (END_OF_ARGLIST (oldargtype) && END_OF_ARGLIST (newargtype))
break;
/* Reaching the end of just one list means the two decls don't
agree on the number of arguments. */
if (END_OF_ARGLIST (oldargtype))
{
error ("prototype for %q+D declares more arguments "
"than previous old-style definition", newdecl);
return false;
}
else if (END_OF_ARGLIST (newargtype))
{
error ("prototype for %q+D declares fewer arguments "
"than previous old-style definition", newdecl);
return false;
}
/* Type for passing arg must be consistent with that declared
for the arg. */
else if (!comptypes (oldargtype, newargtype))
{
error ("prototype for %q+D declares argument %d"
" with incompatible type",
newdecl, i);
return false;
}
oldargs = TREE_CHAIN (oldargs);
newargs = TREE_CHAIN (newargs);
i++;
}
/* If we get here, no errors were found, but do issue a warning
for this poor-style construct. */
warning (0, "prototype for %q+D follows non-prototype definition",
newdecl);
return true;
#undef END_OF_ARGLIST
}
/* Subroutine of diagnose_mismatched_decls. Report the location of DECL,
first in a pair of mismatched declarations, using the diagnostic
function DIAG. */
static void
locate_old_decl (tree decl)
{
if (TREE_CODE (decl) == FUNCTION_DECL && DECL_BUILT_IN (decl)
&& !C_DECL_DECLARED_BUILTIN (decl))
;
else if (DECL_INITIAL (decl))
inform (input_location, "previous definition of %q+D was here", decl);
else if (C_DECL_IMPLICIT (decl))
inform (input_location, "previous implicit declaration of %q+D was here", decl);
else
inform (input_location, "previous declaration of %q+D was here", decl);
}
/* Subroutine of duplicate_decls. Compare NEWDECL to OLDDECL.
Returns true if the caller should proceed to merge the two, false
if OLDDECL should simply be discarded. As a side effect, issues
all necessary diagnostics for invalid or poor-style combinations.
If it returns true, writes the types of NEWDECL and OLDDECL to
*NEWTYPEP and *OLDTYPEP - these may have been adjusted from
TREE_TYPE (NEWDECL, OLDDECL) respectively. */
static bool
diagnose_mismatched_decls (tree newdecl, tree olddecl,
tree *newtypep, tree *oldtypep)
{
tree newtype, oldtype;
bool pedwarned = false;
bool warned = false;
bool retval = true;
#define DECL_EXTERN_INLINE(DECL) (DECL_DECLARED_INLINE_P (DECL) \
&& DECL_EXTERNAL (DECL))
/* If we have error_mark_node for either decl or type, just discard
the previous decl - we're in an error cascade already. */
if (olddecl == error_mark_node || newdecl == error_mark_node)
return false;
*oldtypep = oldtype = TREE_TYPE (olddecl);
*newtypep = newtype = TREE_TYPE (newdecl);
if (oldtype == error_mark_node || newtype == error_mark_node)
return false;
/* Two different categories of symbol altogether. This is an error
unless OLDDECL is a builtin. OLDDECL will be discarded in any case. */
if (TREE_CODE (olddecl) != TREE_CODE (newdecl))
{
if (!(TREE_CODE (olddecl) == FUNCTION_DECL
&& DECL_BUILT_IN (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl)))
{
error ("%q+D redeclared as different kind of symbol", newdecl);
locate_old_decl (olddecl);
}
else if (TREE_PUBLIC (newdecl))
warning (0, "built-in function %q+D declared as non-function",
newdecl);
else
warning (OPT_Wshadow, "declaration of %q+D shadows "
"a built-in function", newdecl);
return false;
}
/* Enumerators have no linkage, so may only be declared once in a
given scope. */
if (TREE_CODE (olddecl) == CONST_DECL)
{
error ("redeclaration of enumerator %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
if (!comptypes (oldtype, newtype))
{
if (TREE_CODE (olddecl) == FUNCTION_DECL
&& DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl))
{
/* Accept harmless mismatch in function types.
This is for the ffs and fprintf builtins. */
tree trytype = match_builtin_function_types (newtype, oldtype);
if (trytype && comptypes (newtype, trytype))
*oldtypep = oldtype = trytype;
else
{
/* If types don't match for a built-in, throw away the
built-in. No point in calling locate_old_decl here, it
won't print anything. */
warning (0, "conflicting types for built-in function %q+D",
newdecl);
return false;
}
}
else if (TREE_CODE (olddecl) == FUNCTION_DECL
&& DECL_IS_BUILTIN (olddecl))
{
/* A conflicting function declaration for a predeclared
function that isn't actually built in. Objective C uses
these. The new declaration silently overrides everything
but the volatility (i.e. noreturn) indication. See also
below. FIXME: Make Objective C use normal builtins. */
TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl);
return false;
}
/* Permit void foo (...) to match int foo (...) if the latter is
the definition and implicit int was used. See
c-torture/compile/920625-2.c. */
else if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl)
&& TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == void_type_node
&& TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == integer_type_node
&& C_FUNCTION_IMPLICIT_INT (newdecl) && !DECL_INITIAL (olddecl))
{
pedwarned = pedwarn (input_location, 0,
"conflicting types for %q+D", newdecl);
/* Make sure we keep void as the return type. */
TREE_TYPE (newdecl) = *newtypep = newtype = oldtype;
C_FUNCTION_IMPLICIT_INT (newdecl) = 0;
}
/* Permit void foo (...) to match an earlier call to foo (...) with
no declared type (thus, implicitly int). */
else if (TREE_CODE (newdecl) == FUNCTION_DECL
&& TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == void_type_node
&& TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == integer_type_node
&& C_DECL_IMPLICIT (olddecl) && !DECL_INITIAL (olddecl))
{
pedwarned = pedwarn (input_location, 0,
"conflicting types for %q+D", newdecl);
/* Make sure we keep void as the return type. */
TREE_TYPE (olddecl) = *oldtypep = oldtype = newtype;
}
else
{
int new_quals = TYPE_QUALS (newtype);
int old_quals = TYPE_QUALS (oldtype);
if (new_quals != old_quals)
{
addr_space_t new_addr = DECODE_QUAL_ADDR_SPACE (new_quals);
addr_space_t old_addr = DECODE_QUAL_ADDR_SPACE (old_quals);
if (new_addr != old_addr)
{
if (ADDR_SPACE_GENERIC_P (new_addr))
error ("conflicting named address spaces (generic vs %s) "
"for %q+D",
c_addr_space_name (old_addr), newdecl);
else if (ADDR_SPACE_GENERIC_P (old_addr))
error ("conflicting named address spaces (%s vs generic) "
"for %q+D",
c_addr_space_name (new_addr), newdecl);
else
error ("conflicting named address spaces (%s vs %s) "
"for %q+D",
c_addr_space_name (new_addr),
c_addr_space_name (old_addr),
newdecl);
}
if (CLEAR_QUAL_ADDR_SPACE (new_quals)
!= CLEAR_QUAL_ADDR_SPACE (old_quals))
error ("conflicting type qualifiers for %q+D", newdecl);
}
else
error ("conflicting types for %q+D", newdecl);
diagnose_arglist_conflict (newdecl, olddecl, newtype, oldtype);
locate_old_decl (olddecl);
return false;
}
}
/* Redeclaration of a type is a constraint violation (6.7.2.3p1),
but silently ignore the redeclaration if either is in a system
header. (Conflicting redeclarations were handled above.) This
is allowed for C11 if the types are the same, not just
compatible. */
if (TREE_CODE (newdecl) == TYPE_DECL)
{
bool types_different = false;
int comptypes_result;
comptypes_result
= comptypes_check_different_types (oldtype, newtype, &types_different);
if (comptypes_result != 1 || types_different)
{
error ("redefinition of typedef %q+D with different type", newdecl);
locate_old_decl (olddecl);
return false;
}
if (DECL_IN_SYSTEM_HEADER (newdecl)
|| DECL_IN_SYSTEM_HEADER (olddecl)
|| TREE_NO_WARNING (newdecl)
|| TREE_NO_WARNING (olddecl))
return true; /* Allow OLDDECL to continue in use. */
if (variably_modified_type_p (newtype, NULL))
{
error ("redefinition of typedef %q+D with variably modified type",
newdecl);
locate_old_decl (olddecl);
}
else if (pedwarn_c99 (input_location, OPT_Wpedantic,
"redefinition of typedef %q+D", newdecl))
locate_old_decl (olddecl);
return true;
}
/* Function declarations can either be 'static' or 'extern' (no
qualifier is equivalent to 'extern' - C99 6.2.2p5) and therefore
can never conflict with each other on account of linkage
(6.2.2p4). Multiple definitions are not allowed (6.9p3,5) but
gnu89 mode permits two definitions if one is 'extern inline' and
one is not. The non- extern-inline definition supersedes the
extern-inline definition. */
else if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
/* If you declare a built-in function name as static, or
define the built-in with an old-style definition (so we
can't validate the argument list) the built-in definition is
overridden, but optionally warn this was a bad choice of name. */
if (DECL_BUILT_IN (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl)
&& (!TREE_PUBLIC (newdecl)
|| (DECL_INITIAL (newdecl)
&& !prototype_p (TREE_TYPE (newdecl)))))
{
warning (OPT_Wshadow, "declaration of %q+D shadows "
"a built-in function", newdecl);
/* Discard the old built-in function. */
return false;
}
if (DECL_INITIAL (newdecl))
{
if (DECL_INITIAL (olddecl))
{
/* If both decls are in the same TU and the new declaration
isn't overriding an extern inline reject the new decl.
In c99, no overriding is allowed in the same translation
unit. */
if ((!DECL_EXTERN_INLINE (olddecl)
|| DECL_EXTERN_INLINE (newdecl)
|| (!flag_gnu89_inline
&& (!DECL_DECLARED_INLINE_P (olddecl)
|| !lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (olddecl)))
&& (!DECL_DECLARED_INLINE_P (newdecl)
|| !lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (newdecl))))
)
&& same_translation_unit_p (newdecl, olddecl))
{
error ("redefinition of %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
}
}
/* If we have a prototype after an old-style function definition,
the argument types must be checked specially. */
else if (DECL_INITIAL (olddecl)
&& !prototype_p (oldtype) && prototype_p (newtype)
&& TYPE_ACTUAL_ARG_TYPES (oldtype)
&& !validate_proto_after_old_defn (newdecl, newtype, oldtype))
{
locate_old_decl (olddecl);
return false;
}
/* A non-static declaration (even an "extern") followed by a
static declaration is undefined behavior per C99 6.2.2p3-5,7.
The same is true for a static forward declaration at block
scope followed by a non-static declaration/definition at file
scope. Static followed by non-static at the same scope is
not undefined behavior, and is the most convenient way to get
some effects (see e.g. what unwind-dw2-fde-glibc.c does to
the definition of _Unwind_Find_FDE in unwind-dw2-fde.c), but
we do diagnose it if -Wtraditional. */
if (TREE_PUBLIC (olddecl) && !TREE_PUBLIC (newdecl))
{
/* Two exceptions to the rule. If olddecl is an extern
inline, or a predeclared function that isn't actually
built in, newdecl silently overrides olddecl. The latter
occur only in Objective C; see also above. (FIXME: Make
Objective C use normal builtins.) */
if (!DECL_IS_BUILTIN (olddecl)
&& !DECL_EXTERN_INLINE (olddecl))
{
error ("static declaration of %q+D follows "
"non-static declaration", newdecl);
locate_old_decl (olddecl);
}
return false;
}
else if (TREE_PUBLIC (newdecl) && !TREE_PUBLIC (olddecl))
{
if (DECL_CONTEXT (olddecl))
{
error ("non-static declaration of %q+D follows "
"static declaration", newdecl);
locate_old_decl (olddecl);
return false;
}
else if (warn_traditional)
{
warned |= warning (OPT_Wtraditional,
"non-static declaration of %q+D "
"follows static declaration", newdecl);
}
}
/* Make sure gnu_inline attribute is either not present, or
present on all inline decls. */
if (DECL_DECLARED_INLINE_P (olddecl)
&& DECL_DECLARED_INLINE_P (newdecl))
{
bool newa = lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (newdecl)) != NULL;
bool olda = lookup_attribute ("gnu_inline",
DECL_ATTRIBUTES (olddecl)) != NULL;
if (newa != olda)
{
error_at (input_location, "%<gnu_inline%> attribute present on %q+D",
newa ? newdecl : olddecl);
error_at (DECL_SOURCE_LOCATION (newa ? olddecl : newdecl),
"but not here");
}
}
}
else if (TREE_CODE (newdecl) == VAR_DECL)
{
/* Only variables can be thread-local, and all declarations must
agree on this property. */
if (C_DECL_THREADPRIVATE_P (olddecl) && !DECL_THREAD_LOCAL_P (newdecl))
{
/* Nothing to check. Since OLDDECL is marked threadprivate
and NEWDECL does not have a thread-local attribute, we
will merge the threadprivate attribute into NEWDECL. */
;
}
else if (DECL_THREAD_LOCAL_P (newdecl) != DECL_THREAD_LOCAL_P (olddecl))
{
if (DECL_THREAD_LOCAL_P (newdecl))
error ("thread-local declaration of %q+D follows "
"non-thread-local declaration", newdecl);
else
error ("non-thread-local declaration of %q+D follows "
"thread-local declaration", newdecl);
locate_old_decl (olddecl);
return false;
}
/* Multiple initialized definitions are not allowed (6.9p3,5). */
if (DECL_INITIAL (newdecl) && DECL_INITIAL (olddecl))
{
error ("redefinition of %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
/* Objects declared at file scope: if the first declaration had
external linkage (even if it was an external reference) the
second must have external linkage as well, or the behavior is
undefined. If the first declaration had internal linkage, then
the second must too, or else be an external reference (in which
case the composite declaration still has internal linkage).
As for function declarations, we warn about the static-then-
extern case only for -Wtraditional. See generally 6.2.2p3-5,7. */
if (DECL_FILE_SCOPE_P (newdecl)
&& TREE_PUBLIC (newdecl) != TREE_PUBLIC (olddecl))
{
if (DECL_EXTERNAL (newdecl))
{
if (!DECL_FILE_SCOPE_P (olddecl))
{
error ("extern declaration of %q+D follows "
"declaration with no linkage", newdecl);
locate_old_decl (olddecl);
return false;
}
else if (warn_traditional)
{
warned |= warning (OPT_Wtraditional,
"non-static declaration of %q+D "
"follows static declaration", newdecl);
}
}
else
{
if (TREE_PUBLIC (newdecl))
error ("non-static declaration of %q+D follows "
"static declaration", newdecl);
else
error ("static declaration of %q+D follows "
"non-static declaration", newdecl);
locate_old_decl (olddecl);
return false;
}
}
/* Two objects with the same name declared at the same block
scope must both be external references (6.7p3). */
else if (!DECL_FILE_SCOPE_P (newdecl))
{
if (DECL_EXTERNAL (newdecl))
{
/* Extern with initializer at block scope, which will
already have received an error. */
}
else if (DECL_EXTERNAL (olddecl))
{
error ("declaration of %q+D with no linkage follows "
"extern declaration", newdecl);
locate_old_decl (olddecl);
}
else
{
error ("redeclaration of %q+D with no linkage", newdecl);
locate_old_decl (olddecl);
}
return false;
}
/* C++ does not permit a decl to appear multiple times at file
scope. */
if (warn_cxx_compat
&& DECL_FILE_SCOPE_P (newdecl)
&& !DECL_EXTERNAL (newdecl)
&& !DECL_EXTERNAL (olddecl))
warned |= warning_at (DECL_SOURCE_LOCATION (newdecl),
OPT_Wc___compat,
("duplicate declaration of %qD is "
"invalid in C++"),
newdecl);
}
/* warnings */
/* All decls must agree on a visibility. */
if (CODE_CONTAINS_STRUCT (TREE_CODE (newdecl), TS_DECL_WITH_VIS)
&& DECL_VISIBILITY_SPECIFIED (newdecl) && DECL_VISIBILITY_SPECIFIED (olddecl)
&& DECL_VISIBILITY (newdecl) != DECL_VISIBILITY (olddecl))
{
warned |= warning (0, "redeclaration of %q+D with different visibility "
"(old visibility preserved)", newdecl);
}
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
/* Diagnose inline __attribute__ ((noinline)) which is silly. */
if (DECL_DECLARED_INLINE_P (newdecl)
&& lookup_attribute ("noinline", DECL_ATTRIBUTES (olddecl)))
warned |= warning (OPT_Wattributes,
"inline declaration of %qD follows "
"declaration with attribute noinline", newdecl);
else if (DECL_DECLARED_INLINE_P (olddecl)
&& lookup_attribute ("noinline", DECL_ATTRIBUTES (newdecl)))
warned |= warning (OPT_Wattributes,
"declaration of %q+D with attribute "
"noinline follows inline declaration ", newdecl);
else if (lookup_attribute ("noinline", DECL_ATTRIBUTES (newdecl))
&& lookup_attribute ("always_inline", DECL_ATTRIBUTES (olddecl)))
warned |= warning (OPT_Wattributes,
"declaration of %q+D with attribute "
"%qs follows declaration with attribute %qs",
newdecl, "noinline", "always_inline");
else if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (newdecl))
&& lookup_attribute ("noinline", DECL_ATTRIBUTES (olddecl)))
warned |= warning (OPT_Wattributes,
"declaration of %q+D with attribute "
"%qs follows declaration with attribute %qs",
newdecl, "always_inline", "noinline");
else if (lookup_attribute ("cold", DECL_ATTRIBUTES (newdecl))
&& lookup_attribute ("hot", DECL_ATTRIBUTES (olddecl)))
warned |= warning (OPT_Wattributes,
"declaration of %q+D with attribute %qs follows "
"declaration with attribute %qs", newdecl, "cold",
"hot");
else if (lookup_attribute ("hot", DECL_ATTRIBUTES (newdecl))
&& lookup_attribute ("cold", DECL_ATTRIBUTES (olddecl)))
warned |= warning (OPT_Wattributes,
"declaration of %q+D with attribute %qs follows "
"declaration with attribute %qs", newdecl, "hot",
"cold");
}
else /* PARM_DECL, VAR_DECL */
{
/* Redeclaration of a parameter is a constraint violation (this is
not explicitly stated, but follows from C99 6.7p3 [no more than
one declaration of the same identifier with no linkage in the
same scope, except type tags] and 6.2.2p6 [parameters have no
linkage]). We must check for a forward parameter declaration,
indicated by TREE_ASM_WRITTEN on the old declaration - this is
an extension, the mandatory diagnostic for which is handled by
mark_forward_parm_decls. */
if (TREE_CODE (newdecl) == PARM_DECL
&& (!TREE_ASM_WRITTEN (olddecl) || TREE_ASM_WRITTEN (newdecl)))
{
error ("redefinition of parameter %q+D", newdecl);
locate_old_decl (olddecl);
return false;
}
}
/* Optional warning for completely redundant decls. */
if (!warned && !pedwarned
&& warn_redundant_decls
/* Don't warn about a function declaration followed by a
definition. */
&& !(TREE_CODE (newdecl) == FUNCTION_DECL
&& DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl))
/* Don't warn about redundant redeclarations of builtins. */
&& !(TREE_CODE (newdecl) == FUNCTION_DECL
&& !DECL_BUILT_IN (newdecl)
&& DECL_BUILT_IN (olddecl)
&& !C_DECL_DECLARED_BUILTIN (olddecl))
/* Don't warn about an extern followed by a definition. */
&& !(DECL_EXTERNAL (olddecl) && !DECL_EXTERNAL (newdecl))
/* Don't warn about forward parameter decls. */
&& !(TREE_CODE (newdecl) == PARM_DECL
&& TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl))
/* Don't warn about a variable definition following a declaration. */
&& !(TREE_CODE (newdecl) == VAR_DECL
&& DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl)))
{
warned = warning (OPT_Wredundant_decls, "redundant redeclaration of %q+D",
newdecl);
}
/* Report location of previous decl/defn. */
if (warned || pedwarned)
locate_old_decl (olddecl);
#undef DECL_EXTERN_INLINE
return retval;
}
/* Subroutine of duplicate_decls. NEWDECL has been found to be
consistent with OLDDECL, but carries new information. Merge the
new information into OLDDECL. This function issues no
diagnostics. */
static void
merge_decls (tree newdecl, tree olddecl, tree newtype, tree oldtype)
{
bool new_is_definition = (TREE_CODE (newdecl) == FUNCTION_DECL
&& DECL_INITIAL (newdecl) != 0);
bool new_is_prototype = (TREE_CODE (newdecl) == FUNCTION_DECL
&& prototype_p (TREE_TYPE (newdecl)));
bool old_is_prototype = (TREE_CODE (olddecl) == FUNCTION_DECL
&& prototype_p (TREE_TYPE (olddecl)));
/* For real parm decl following a forward decl, rechain the old decl
in its new location and clear TREE_ASM_WRITTEN (it's not a
forward decl anymore). */
if (TREE_CODE (newdecl) == PARM_DECL
&& TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl))
{
struct c_binding *b, **here;
for (here = ¤t_scope->bindings; *here; here = &(*here)->prev)
if ((*here)->decl == olddecl)
goto found;
gcc_unreachable ();
found:
b = *here;
*here = b->prev;
b->prev = current_scope->bindings;
current_scope->bindings = b;
TREE_ASM_WRITTEN (olddecl) = 0;
}
DECL_ATTRIBUTES (newdecl)
= targetm.merge_decl_attributes (olddecl, newdecl);
/* Merge the data types specified in the two decls. */
TREE_TYPE (newdecl)
= TREE_TYPE (olddecl)
= composite_type (newtype, oldtype);
/* Lay the type out, unless already done. */
if (!comptypes (oldtype, TREE_TYPE (newdecl)))
{
if (TREE_TYPE (newdecl) != error_mark_node)
layout_type (TREE_TYPE (newdecl));
if (TREE_CODE (newdecl) != FUNCTION_DECL
&& TREE_CODE (newdecl) != TYPE_DECL
&& TREE_CODE (newdecl) != CONST_DECL)
layout_decl (newdecl, 0);
}
else
{
/* Since the type is OLDDECL's, make OLDDECL's size go with. */
DECL_SIZE (newdecl) = DECL_SIZE (olddecl);
DECL_SIZE_UNIT (newdecl) = DECL_SIZE_UNIT (olddecl);
DECL_MODE (newdecl) = DECL_MODE (olddecl);
if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl))
{
DECL_ALIGN (newdecl) = DECL_ALIGN (olddecl);
DECL_USER_ALIGN (newdecl) |= DECL_USER_ALIGN (olddecl);
}
}
/* Keep the old rtl since we can safely use it. */
if (HAS_RTL_P (olddecl))
COPY_DECL_RTL (olddecl, newdecl);
/* Merge the type qualifiers. */
if (TREE_READONLY (newdecl))
TREE_READONLY (olddecl) = 1;
if (TREE_THIS_VOLATILE (newdecl))
TREE_THIS_VOLATILE (olddecl) = 1;
/* Merge deprecatedness. */
if (TREE_DEPRECATED (newdecl))
TREE_DEPRECATED (olddecl) = 1;
/* If a decl is in a system header and the other isn't, keep the one on the
system header. Otherwise, keep source location of definition rather than
declaration and of prototype rather than non-prototype unless that
prototype is built-in. */
if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS)
&& DECL_IN_SYSTEM_HEADER (olddecl)
&& !DECL_IN_SYSTEM_HEADER (newdecl) )
DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl);
else if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS)
&& DECL_IN_SYSTEM_HEADER (newdecl)
&& !DECL_IN_SYSTEM_HEADER (olddecl))
DECL_SOURCE_LOCATION (olddecl) = DECL_SOURCE_LOCATION (newdecl);
else if ((DECL_INITIAL (newdecl) == 0 && DECL_INITIAL (olddecl) != 0)
|| (old_is_prototype && !new_is_prototype
&& !C_DECL_BUILTIN_PROTOTYPE (olddecl)))
DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl);
/* Merge the initialization information. */
if (DECL_INITIAL (newdecl) == 0)
DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl);
/* Merge the threadprivate attribute. */
if (TREE_CODE (olddecl) == VAR_DECL && C_DECL_THREADPRIVATE_P (olddecl))
C_DECL_THREADPRIVATE_P (newdecl) = 1;
if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS))
{
/* Copy the assembler name.
Currently, it can only be defined in the prototype. */
COPY_DECL_ASSEMBLER_NAME (olddecl, newdecl);
/* Use visibility of whichever declaration had it specified */
if (DECL_VISIBILITY_SPECIFIED (olddecl))
{
DECL_VISIBILITY (newdecl) = DECL_VISIBILITY (olddecl);
DECL_VISIBILITY_SPECIFIED (newdecl) = 1;
}
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
DECL_STATIC_CONSTRUCTOR(newdecl) |= DECL_STATIC_CONSTRUCTOR(olddecl);
DECL_STATIC_DESTRUCTOR (newdecl) |= DECL_STATIC_DESTRUCTOR (olddecl);
DECL_NO_LIMIT_STACK (newdecl) |= DECL_NO_LIMIT_STACK (olddecl);
DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (newdecl)
|= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (olddecl);
TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl);
DECL_IS_MALLOC (newdecl) |= DECL_IS_MALLOC (olddecl);
DECL_IS_OPERATOR_NEW (newdecl) |= DECL_IS_OPERATOR_NEW (olddecl);
TREE_READONLY (newdecl) |= TREE_READONLY (olddecl);
DECL_PURE_P (newdecl) |= DECL_PURE_P (olddecl);
DECL_IS_NOVOPS (newdecl) |= DECL_IS_NOVOPS (olddecl);
}
/* Merge the storage class information. */
merge_weak (newdecl, olddecl);
/* For functions, static overrides non-static. */
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
TREE_PUBLIC (newdecl) &= TREE_PUBLIC (olddecl);
/* This is since we don't automatically
copy the attributes of NEWDECL into OLDDECL. */
TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
/* If this clears `static', clear it in the identifier too. */
if (!TREE_PUBLIC (olddecl))
TREE_PUBLIC (DECL_NAME (olddecl)) = 0;
}
}
/* In c99, 'extern' declaration before (or after) 'inline' means this
function is not DECL_EXTERNAL, unless 'gnu_inline' attribute
is present. */
if (TREE_CODE (newdecl) == FUNCTION_DECL
&& !flag_gnu89_inline
&& (DECL_DECLARED_INLINE_P (newdecl)
|| DECL_DECLARED_INLINE_P (olddecl))
&& (!DECL_DECLARED_INLINE_P (newdecl)
|| !DECL_DECLARED_INLINE_P (olddecl)
|| !DECL_EXTERNAL (olddecl))
&& DECL_EXTERNAL (newdecl)
&& !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl))
&& !current_function_decl)
DECL_EXTERNAL (newdecl) = 0;
/* An inline definition following a static declaration is not
DECL_EXTERNAL. */
if (new_is_definition
&& (DECL_DECLARED_INLINE_P (newdecl)
|| DECL_DECLARED_INLINE_P (olddecl))
&& !TREE_PUBLIC (olddecl))
DECL_EXTERNAL (newdecl) = 0;
if (DECL_EXTERNAL (newdecl))
{
TREE_STATIC (newdecl) = TREE_STATIC (olddecl);
DECL_EXTERNAL (newdecl) = DECL_EXTERNAL (olddecl);
/* An extern decl does not override previous storage class. */
TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl);
if (!DECL_EXTERNAL (newdecl))
{
DECL_CONTEXT (newdecl) = DECL_CONTEXT (olddecl);
DECL_COMMON (newdecl) = DECL_COMMON (olddecl);
}
}
else
{
TREE_STATIC (olddecl) = TREE_STATIC (newdecl);
TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl);
}
if (TREE_CODE (newdecl) == FUNCTION_DECL)
{
/* If we're redefining a function previously defined as extern
inline, make sure we emit debug info for the inline before we
throw it away, in case it was inlined into a function that
hasn't been written out yet. */
if (new_is_definition && DECL_INITIAL (olddecl))
/* The new defn must not be inline. */
DECL_UNINLINABLE (newdecl) = 1;
else
{
/* If either decl says `inline', this fn is inline, unless
its definition was passed already. */
if (DECL_DECLARED_INLINE_P (newdecl)
|| DECL_DECLARED_INLINE_P (olddecl))
DECL_DECLARED_INLINE_P (newdecl) = 1;
DECL_UNINLINABLE (newdecl) = DECL_UNINLINABLE (olddecl)
= (DECL_UNINLINABLE (newdecl) || DECL_UNINLINABLE (olddecl));
DECL_DISREGARD_INLINE_LIMITS (newdecl)
= DECL_DISREGARD_INLINE_LIMITS (olddecl)
= (DECL_DISREGARD_INLINE_LIMITS (newdecl)
|| DECL_DISREGARD_INLINE_LIMITS (olddecl));
}
if (DECL_BUILT_IN (olddecl))
{
/* If redeclaring a builtin function, it stays built in.
But it gets tagged as having been declared. */
DECL_BUILT_IN_CLASS (newdecl) = DECL_BUILT_IN_CLASS (olddecl);
DECL_FUNCTION_CODE (newdecl) = DECL_FUNCTION_CODE (olddecl);
C_DECL_DECLARED_BUILTIN (newdecl) = 1;
if (new_is_prototype)
{
C_DECL_BUILTIN_PROTOTYPE (newdecl) = 0;
if (DECL_BUILT_IN_CLASS (newdecl) == BUILT_IN_NORMAL)
{
enum built_in_function fncode = DECL_FUNCTION_CODE (newdecl);
switch (fncode)
{
/* If a compatible prototype of these builtin functions
is seen, assume the runtime implements it with the
expected semantics. */
case BUILT_IN_STPCPY:
if (builtin_decl_explicit_p (fncode))
set_builtin_decl_implicit_p (fncode, true);
break;
default:
if (builtin_decl_explicit_p (fncode))
set_builtin_decl_declared_p (fncode, true);
break;
}
}
}
else
C_DECL_BUILTIN_PROTOTYPE (newdecl)
= C_DECL_BUILTIN_PROTOTYPE (olddecl);
}
/* Preserve function specific target and optimization options */
if (DECL_FUNCTION_SPECIFIC_TARGET (olddecl)
&& !DECL_FUNCTION_SPECIFIC_TARGET (newdecl))
DECL_FUNCTION_SPECIFIC_TARGET (newdecl)
= DECL_FUNCTION_SPECIFIC_TARGET (olddecl);
if (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl)
&& !DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl))
DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl)
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl);
/* Also preserve various other info from the definition. */
if (!new_is_definition)
{
tree t;
DECL_RESULT (newdecl) = DECL_RESULT (olddecl);
DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl);
DECL_STRUCT_FUNCTION (newdecl) = DECL_STRUCT_FUNCTION (olddecl);
DECL_SAVED_TREE (newdecl) = DECL_SAVED_TREE (olddecl);
DECL_ARGUMENTS (newdecl) = copy_list (DECL_ARGUMENTS (olddecl));
for (t = DECL_ARGUMENTS (newdecl); t ; t = DECL_CHAIN (t))
DECL_CONTEXT (t) = newdecl;
/* See if we've got a function to instantiate from. */
if (DECL_SAVED_TREE (olddecl))
DECL_ABSTRACT_ORIGIN (newdecl)
= DECL_ABSTRACT_ORIGIN (olddecl);
}
}
/* Merge the USED information. */
if (TREE_USED (olddecl))
TREE_USED (newdecl) = 1;
else if (TREE_USED (newdecl))
TREE_USED (olddecl) = 1;
if (TREE_CODE (olddecl) == VAR_DECL || TREE_CODE (olddecl) == PARM_DECL)
DECL_READ_P (newdecl) |= DECL_READ_P (olddecl);
if (DECL_PRESERVE_P (olddecl))
DECL_PRESERVE_P (newdecl) = 1;
else if (DECL_PRESERVE_P (newdecl))
DECL_PRESERVE_P (olddecl) = 1;
/* Copy most of the decl-specific fields of NEWDECL into OLDDECL.
But preserve OLDDECL's DECL_UID, DECL_CONTEXT and
DECL_ARGUMENTS (if appropriate). */
{
unsigned olddecl_uid = DECL_UID (olddecl);
tree olddecl_context = DECL_CONTEXT (olddecl);
tree olddecl_arguments = NULL;
if (TREE_CODE (olddecl) == FUNCTION_DECL)
olddecl_arguments = DECL_ARGUMENTS (olddecl);
memcpy ((char *) olddecl + sizeof (struct tree_common),
(char *) newdecl + sizeof (struct tree_common),
sizeof (struct tree_decl_common) - sizeof (struct tree_common));
DECL_USER_ALIGN (olddecl) = DECL_USER_ALIGN (newdecl);
switch (TREE_CODE (olddecl))
{
case FUNCTION_DECL:
case VAR_DECL:
{
struct symtab_node *snode = olddecl->decl_with_vis.symtab_node;
memcpy ((char *) olddecl + sizeof (struct tree_decl_common),
(char *) newdecl + sizeof (struct tree_decl_common),
tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common));
olddecl->decl_with_vis.symtab_node = snode;
if ((DECL_EXTERNAL (olddecl)
|| TREE_PUBLIC (olddecl)
|| TREE_STATIC (olddecl))
&& DECL_SECTION_NAME (newdecl) != NULL)
set_decl_section_name (olddecl, DECL_SECTION_NAME (newdecl));
/* This isn't quite correct for something like
int __thread x attribute ((tls_model ("local-exec")));
extern int __thread x;
as we'll lose the "local-exec" model. */
if (TREE_CODE (olddecl) == VAR_DECL
&& DECL_THREAD_LOCAL_P (newdecl))
set_decl_tls_model (olddecl, DECL_TLS_MODEL (newdecl));
break;
}
case FIELD_DECL:
case PARM_DECL:
case LABEL_DECL:
case RESULT_DECL:
case CONST_DECL:
case TYPE_DECL:
memcpy ((char *) olddecl + sizeof (struct tree_decl_common),
(char *) newdecl + sizeof (struct tree_decl_common),
tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common));
break;
default:
memcpy ((char *) olddecl + sizeof (struct tree_decl_common),
(char *) newdecl + sizeof (struct tree_decl_common),
sizeof (struct tree_decl_non_common) - sizeof (struct tree_decl_common));
}
DECL_UID (olddecl) = olddecl_uid;
DECL_CONTEXT (olddecl) = olddecl_context;
if (TREE_CODE (olddecl) == FUNCTION_DECL)
DECL_ARGUMENTS (olddecl) = olddecl_arguments;
}
/* If OLDDECL had its DECL_RTL instantiated, re-invoke make_decl_rtl
so that encode_section_info has a chance to look at the new decl
flags and attributes. */
if (DECL_RTL_SET_P (olddecl)
&& (TREE_CODE (olddecl) == FUNCTION_DECL
|| (TREE_CODE (olddecl) == VAR_DECL
&& TREE_STATIC (olddecl))))
make_decl_rtl (olddecl);
}
/* Handle when a new declaration NEWDECL has the same name as an old
one OLDDECL in the same binding contour. Prints an error message
if appropriate.
If safely possible, alter OLDDECL to look like NEWDECL, and return
true. Otherwise, return false. */
static bool
duplicate_decls (tree newdecl, tree olddecl)
{
tree newtype = NULL, oldtype = NULL;
if (!diagnose_mismatched_decls (newdecl, olddecl, &newtype, &oldtype))
{
/* Avoid `unused variable' and other warnings for OLDDECL. */
TREE_NO_WARNING (olddecl) = 1;
return false;
}
merge_decls (newdecl, olddecl, newtype, oldtype);
/* The NEWDECL will no longer be needed.
Before releasing the node, be sure to remove function from symbol
table that might have been inserted there to record comdat group.
Be sure to however do not free DECL_STRUCT_FUNCTION because this
structure is shared in between NEWDECL and OLDECL. */
if (TREE_CODE (newdecl) == FUNCTION_DECL)
DECL_STRUCT_FUNCTION (newdecl) = NULL;
if (TREE_CODE (newdecl) == FUNCTION_DECL
|| TREE_CODE (newdecl) == VAR_DECL)
{
struct symtab_node *snode = symtab_node::get (newdecl);
if (snode)
snode->remove ();
}
ggc_free (newdecl);
return true;
}
/* Check whether decl-node NEW_DECL shadows an existing declaration. */
static void
warn_if_shadowing (tree new_decl)
{
struct c_binding *b;
/* Shadow warnings wanted? */
if (!warn_shadow
/* No shadow warnings for internally generated vars. */
|| DECL_IS_BUILTIN (new_decl)
/* No shadow warnings for vars made for inlining. */
|| DECL_FROM_INLINE (new_decl))
return;
/* Is anything being shadowed? Invisible decls do not count. */
for (b = I_SYMBOL_BINDING (DECL_NAME (new_decl)); b; b = b->shadowed)
if (b->decl && b->decl != new_decl && !b->invisible
&& (b->decl == error_mark_node
|| diagnostic_report_warnings_p (global_dc,
DECL_SOURCE_LOCATION (b->decl))))
{
tree old_decl = b->decl;
bool warned = false;
if (old_decl == error_mark_node)
{
warning (OPT_Wshadow, "declaration of %q+D shadows previous "
"non-variable", new_decl);
break;
}
else if (TREE_CODE (old_decl) == PARM_DECL)
warned = warning (OPT_Wshadow,
"declaration of %q+D shadows a parameter",
new_decl);
else if (DECL_FILE_SCOPE_P (old_decl))
{
/* Do not warn if a variable shadows a function, unless
the variable is a function or a pointer-to-function. */
if (TREE_CODE (old_decl) == FUNCTION_DECL
&& TREE_CODE (new_decl) != FUNCTION_DECL
&& !FUNCTION_POINTER_TYPE_P (TREE_TYPE (new_decl)))
continue;
warned = warning_at (DECL_SOURCE_LOCATION (new_decl), OPT_Wshadow,
"declaration of %qD shadows a global "
"declaration",
new_decl);
}
else if (TREE_CODE (old_decl) == FUNCTION_DECL
&& DECL_BUILT_IN (old_decl))
{
warning (OPT_Wshadow, "declaration of %q+D shadows "
"a built-in function", new_decl);
break;
}
else
warned = warning (OPT_Wshadow, "declaration of %q+D shadows a "
"previous local", new_decl);
if (warned)
inform (DECL_SOURCE_LOCATION (old_decl),
"shadowed declaration is here");
break;
}
}
/* Record a decl-node X as belonging to the current lexical scope.
Check for errors (such as an incompatible declaration for the same
name already seen in the same scope).
Returns either X or an old decl for the same name.
If an old decl is returned, it may have been smashed
to agree with what X says. */
tree
pushdecl (tree x)
{
tree name = DECL_NAME (x);
struct c_scope *scope = current_scope;
struct c_binding *b;
bool nested = false;
location_t locus = DECL_SOURCE_LOCATION (x);
/* Must set DECL_CONTEXT for everything not at file scope or
DECL_FILE_SCOPE_P won't work. Local externs don't count
unless they have initializers (which generate code). */
if (current_function_decl
&& ((TREE_CODE (x) != FUNCTION_DECL && TREE_CODE (x) != VAR_DECL)
|| DECL_INITIAL (x) || !DECL_EXTERNAL (x)))
DECL_CONTEXT (x) = current_function_decl;
/* Anonymous decls are just inserted in the scope. */
if (!name)
{
bind (name, x, scope, /*invisible=*/false, /*nested=*/false,
locus);
return x;
}
/* First, see if there is another declaration with the same name in
the current scope. If there is, duplicate_decls may do all the
work for us. If duplicate_decls returns false, that indicates
two incompatible decls in the same scope; we are to silently
replace the old one (duplicate_decls has issued all appropriate
diagnostics). In particular, we should not consider possible
duplicates in the external scope, or shadowing. */
b = I_SYMBOL_BINDING (name);
if (b && B_IN_SCOPE (b, scope))
{
struct c_binding *b_ext, *b_use;
tree type = TREE_TYPE (x);
tree visdecl = b->decl;
tree vistype = TREE_TYPE (visdecl);
if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& COMPLETE_TYPE_P (TREE_TYPE (x)))
b->inner_comp = false;
b_use = b;
b_ext = b;
/* If this is an external linkage declaration, we should check
for compatibility with the type in the external scope before
setting the type at this scope based on the visible
information only. */
if (TREE_PUBLIC (x) && TREE_PUBLIC (visdecl))
{
while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext))
b_ext = b_ext->shadowed;
if (b_ext)
{
b_use = b_ext;
if (b_use->u.type)
TREE_TYPE (b_use->decl) = b_use->u.type;
}
}
if (duplicate_decls (x, b_use->decl))
{
if (b_use != b)
{
/* Save the updated type in the external scope and
restore the proper type for this scope. */
tree thistype;
if (comptypes (vistype, type))
thistype = composite_type (vistype, type);
else
thistype = TREE_TYPE (b_use->decl);
b_use->u.type = TREE_TYPE (b_use->decl);
if (TREE_CODE (b_use->decl) == FUNCTION_DECL
&& DECL_BUILT_IN (b_use->decl))
thistype
= build_type_attribute_variant (thistype,
TYPE_ATTRIBUTES
(b_use->u.type));
TREE_TYPE (b_use->decl) = thistype;
}
return b_use->decl;
}
else
goto skip_external_and_shadow_checks;
}
/* All declarations with external linkage, and all external
references, go in the external scope, no matter what scope is
current. However, the binding in that scope is ignored for
purposes of normal name lookup. A separate binding structure is
created in the requested scope; this governs the normal
visibility of the symbol.
The binding in the externals scope is used exclusively for
detecting duplicate declarations of the same object, no matter
what scope they are in; this is what we do here. (C99 6.2.7p2:
All declarations that refer to the same object or function shall
have compatible type; otherwise, the behavior is undefined.) */
if (DECL_EXTERNAL (x) || scope == file_scope)
{
tree type = TREE_TYPE (x);
tree vistype = 0;
tree visdecl = 0;
bool type_saved = false;
if (b && !B_IN_EXTERNAL_SCOPE (b)
&& (TREE_CODE (b->decl) == FUNCTION_DECL
|| TREE_CODE (b->decl) == VAR_DECL)
&& DECL_FILE_SCOPE_P (b->decl))
{
visdecl = b->decl;
vistype = TREE_TYPE (visdecl);
}
if (scope != file_scope
&& !DECL_IN_SYSTEM_HEADER (x))
warning (OPT_Wnested_externs, "nested extern declaration of %qD", x);
while (b && !B_IN_EXTERNAL_SCOPE (b))
{
/* If this decl might be modified, save its type. This is
done here rather than when the decl is first bound
because the type may change after first binding, through
being completed or through attributes being added. If we
encounter multiple such decls, only the first should have
its type saved; the others will already have had their
proper types saved and the types will not have changed as
their scopes will not have been re-entered. */
if (DECL_P (b->decl) && DECL_FILE_SCOPE_P (b->decl) && !type_saved)
{
b->u.type = TREE_TYPE (b->decl);
type_saved = true;
}
if (B_IN_FILE_SCOPE (b)
&& TREE_CODE (b->decl) == VAR_DECL
&& TREE_STATIC (b->decl)
&& TREE_CODE (TREE_TYPE (b->decl)) == ARRAY_TYPE
&& !TYPE_DOMAIN (TREE_TYPE (b->decl))
&& TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type)
&& TYPE_MAX_VALUE (TYPE_DOMAIN (type))
&& !integer_zerop (TYPE_MAX_VALUE (TYPE_DOMAIN (type))))
{
/* Array type completed in inner scope, which should be
diagnosed if the completion does not have size 1 and
it does not get completed in the file scope. */
b->inner_comp = true;
}
b = b->shadowed;
}
/* If a matching external declaration has been found, set its
type to the composite of all the types of that declaration.
After the consistency checks, it will be reset to the
composite of the visible types only. */
if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl))
&& b->u.type)
TREE_TYPE (b->decl) = b->u.type;
/* The point of the same_translation_unit_p check here is,
we want to detect a duplicate decl for a construct like
foo() { extern bar(); } ... static bar(); but not if
they are in different translation units. In any case,
the static does not go in the externals scope. */
if (b
&& (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl))
&& duplicate_decls (x, b->decl))
{
tree thistype;
if (vistype)
{
if (comptypes (vistype, type))
thistype = composite_type (vistype, type);
else
thistype = TREE_TYPE (b->decl);
}
else
thistype = type;
b->u.type = TREE_TYPE (b->decl);
if (TREE_CODE (b->decl) == FUNCTION_DECL && DECL_BUILT_IN (b->decl))
thistype
= build_type_attribute_variant (thistype,
TYPE_ATTRIBUTES (b->u.type));
TREE_TYPE (b->decl) = thistype;
bind (name, b->decl, scope, /*invisible=*/false, /*nested=*/true,
locus);
return b->decl;
}
else if (TREE_PUBLIC (x))
{
if (visdecl && !b && duplicate_decls (x, visdecl))
{
/* An external declaration at block scope referring to a
visible entity with internal linkage. The composite
type will already be correct for this scope, so we
just need to fall through to make the declaration in
this scope. */
nested = true;
x = visdecl;
}
else
{
bind (name, x, external_scope, /*invisible=*/true,
/*nested=*/false, locus);
nested = true;
}
}
}
if (TREE_CODE (x) != PARM_DECL)
warn_if_shadowing (x);
skip_external_and_shadow_checks:
if (TREE_CODE (x) == TYPE_DECL)
{
/* So this is a typedef, set its underlying type. */
set_underlying_type (x);
/* If X is a typedef defined in the current function, record it
for the purpose of implementing the -Wunused-local-typedefs
warning. */
record_locally_defined_typedef (x);
}
bind (name, x, scope, /*invisible=*/false, nested, locus);
/* If x's type is incomplete because it's based on a
structure or union which has not yet been fully declared,
attach it to that structure or union type, so we can go
back and complete the variable declaration later, if the
structure or union gets fully declared.
If the input is erroneous, we can have error_mark in the type
slot (e.g. "f(void a, ...)") - that doesn't count as an
incomplete type. */
if (TREE_TYPE (x) != error_mark_node
&& !COMPLETE_TYPE_P (TREE_TYPE (x)))
{
tree element = TREE_TYPE (x);
while (TREE_CODE (element) == ARRAY_TYPE)
element = TREE_TYPE (element);
element = TYPE_MAIN_VARIANT (element);
if ((TREE_CODE (element) == RECORD_TYPE
|| TREE_CODE (element) == UNION_TYPE)
&& (TREE_CODE (x) != TYPE_DECL
|| TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE)
&& !COMPLETE_TYPE_P (element))
C_TYPE_INCOMPLETE_VARS (element)
= tree_cons (NULL_TREE, x, C_TYPE_INCOMPLETE_VARS (element));
}
return x;
}
/* Record X as belonging to file scope.
This is used only internally by the Objective-C front end,
and is limited to its needs. duplicate_decls is not called;
if there is any preexisting decl for this identifier, it is an ICE. */
tree
pushdecl_top_level (tree x)
{
tree name;
bool nested = false;
gcc_assert (TREE_CODE (x) == VAR_DECL || TREE_CODE (x) == CONST_DECL);
name = DECL_NAME (x);
gcc_assert (TREE_CODE (x) == CONST_DECL || !I_SYMBOL_BINDING (name));
if (TREE_PUBLIC (x))
{
bind (name, x, external_scope, /*invisible=*/true, /*nested=*/false,
UNKNOWN_LOCATION);
nested = true;
}
if (file_scope)
bind (name, x, file_scope, /*invisible=*/false, nested, UNKNOWN_LOCATION);
return x;
}
static void
implicit_decl_warning (location_t loc, tree id, tree olddecl)
{
if (warn_implicit_function_declaration)
{
bool warned;
if (flag_isoc99)
warned = pedwarn (loc, OPT_Wimplicit_function_declaration,
"implicit declaration of function %qE", id);
else
warned = warning_at (loc, OPT_Wimplicit_function_declaration,
G_("implicit declaration of function %qE"), id);
if (olddecl && warned)
locate_old_decl (olddecl);
}
}
/* This function represents mapping of a function code FCODE
to its respective header. */
static const char *
header_for_builtin_fn (enum built_in_function fcode)
{
switch (fcode)
{
CASE_FLT_FN (BUILT_IN_ACOS):
CASE_FLT_FN (BUILT_IN_ACOSH):
CASE_FLT_FN (BUILT_IN_ASIN):
CASE_FLT_FN (BUILT_IN_ASINH):
CASE_FLT_FN (BUILT_IN_ATAN):
CASE_FLT_FN (BUILT_IN_ATANH):
CASE_FLT_FN (BUILT_IN_ATAN2):
CASE_FLT_FN (BUILT_IN_CBRT):
CASE_FLT_FN (BUILT_IN_CEIL):
CASE_FLT_FN (BUILT_IN_COPYSIGN):
CASE_FLT_FN (BUILT_IN_COS):
CASE_FLT_FN (BUILT_IN_COSH):
CASE_FLT_FN (BUILT_IN_ERF):
CASE_FLT_FN (BUILT_IN_ERFC):
CASE_FLT_FN (BUILT_IN_EXP):
CASE_FLT_FN (BUILT_IN_EXP2):
CASE_FLT_FN (BUILT_IN_EXPM1):
CASE_FLT_FN (BUILT_IN_FABS):
CASE_FLT_FN (BUILT_IN_FDIM):
CASE_FLT_FN (BUILT_IN_FLOOR):
CASE_FLT_FN (BUILT_IN_FMA):
CASE_FLT_FN (BUILT_IN_FMAX):
CASE_FLT_FN (BUILT_IN_FMIN):
CASE_FLT_FN (BUILT_IN_FMOD):
CASE_FLT_FN (BUILT_IN_FREXP):
CASE_FLT_FN (BUILT_IN_HYPOT):
CASE_FLT_FN (BUILT_IN_ILOGB):
CASE_FLT_FN (BUILT_IN_LDEXP):
CASE_FLT_FN (BUILT_IN_LGAMMA):
CASE_FLT_FN (BUILT_IN_LLRINT):
CASE_FLT_FN (BUILT_IN_LLROUND):
CASE_FLT_FN (BUILT_IN_LOG):
CASE_FLT_FN (BUILT_IN_LOG10):
CASE_FLT_FN (BUILT_IN_LOG1P):
CASE_FLT_FN (BUILT_IN_LOG2):
CASE_FLT_FN (BUILT_IN_LOGB):
CASE_FLT_FN (BUILT_IN_LRINT):
CASE_FLT_FN (BUILT_IN_LROUND):
CASE_FLT_FN (BUILT_IN_MODF):
CASE_FLT_FN (BUILT_IN_NAN):
CASE_FLT_FN (BUILT_IN_NEARBYINT):
CASE_FLT_FN (BUILT_IN_NEXTAFTER):
CASE_FLT_FN (BUILT_IN_NEXTTOWARD):
CASE_FLT_FN (BUILT_IN_POW):
CASE_FLT_FN (BUILT_IN_REMAINDER):
CASE_FLT_FN (BUILT_IN_REMQUO):
CASE_FLT_FN (BUILT_IN_RINT):
CASE_FLT_FN (BUILT_IN_ROUND):
CASE_FLT_FN (BUILT_IN_SCALBLN):
CASE_FLT_FN (BUILT_IN_SCALBN):
CASE_FLT_FN (BUILT_IN_SIN):
CASE_FLT_FN (BUILT_IN_SINH):
CASE_FLT_FN (BUILT_IN_SINCOS):
CASE_FLT_FN (BUILT_IN_SQRT):
CASE_FLT_FN (BUILT_IN_TAN):
CASE_FLT_FN (BUILT_IN_TANH):
CASE_FLT_FN (BUILT_IN_TGAMMA):
CASE_FLT_FN (BUILT_IN_TRUNC):
case BUILT_IN_ISINF:
case BUILT_IN_ISNAN:
return "<math.h>";
CASE_FLT_FN (BUILT_IN_CABS):
CASE_FLT_FN (BUILT_IN_CACOS):
CASE_FLT_FN (BUILT_IN_CACOSH):
CASE_FLT_FN (BUILT_IN_CARG):
CASE_FLT_FN (BUILT_IN_CASIN):
CASE_FLT_FN (BUILT_IN_CASINH):
CASE_FLT_FN (BUILT_IN_CATAN):
CASE_FLT_FN (BUILT_IN_CATANH):
CASE_FLT_FN (BUILT_IN_CCOS):
CASE_FLT_FN (BUILT_IN_CCOSH):
CASE_FLT_FN (BUILT_IN_CEXP):
CASE_FLT_FN (BUILT_IN_CIMAG):
CASE_FLT_FN (BUILT_IN_CLOG):
CASE_FLT_FN (BUILT_IN_CONJ):
CASE_FLT_FN (BUILT_IN_CPOW):
CASE_FLT_FN (BUILT_IN_CPROJ):
CASE_FLT_FN (BUILT_IN_CREAL):
CASE_FLT_FN (BUILT_IN_CSIN):
CASE_FLT_FN (BUILT_IN_CSINH):
CASE_FLT_FN (BUILT_IN_CSQRT):
CASE_FLT_FN (BUILT_IN_CTAN):
CASE_FLT_FN (BUILT_IN_CTANH):
return "<complex.h>";
case BUILT_IN_MEMCHR:
case BUILT_IN_MEMCMP:
case BUILT_IN_MEMCPY:
case BUILT_IN_MEMMOVE:
case BUILT_IN_MEMSET:
case BUILT_IN_STRCAT:
case BUILT_IN_STRCHR:
case BUILT_IN_STRCMP:
case BUILT_IN_STRCPY:
case BUILT_IN_STRCSPN:
case BUILT_IN_STRLEN:
case BUILT_IN_STRNCAT:
case BUILT_IN_STRNCMP:
case BUILT_IN_STRNCPY:
case BUILT_IN_STRPBRK:
case BUILT_IN_STRRCHR:
case BUILT_IN_STRSPN:
case BUILT_IN_STRSTR:
return "<string.h>";
case BUILT_IN_FPRINTF:
case BUILT_IN_PUTC:
case BUILT_IN_FPUTC:
case BUILT_IN_FPUTS:
case BUILT_IN_FSCANF:
case BUILT_IN_FWRITE:
case BUILT_IN_PRINTF:
case BUILT_IN_PUTCHAR:
case BUILT_IN_PUTS:
case BUILT_IN_SCANF:
case BUILT_IN_SNPRINTF:
case BUILT_IN_SPRINTF:
case BUILT_IN_SSCANF:
case BUILT_IN_VFPRINTF:
case BUILT_IN_VFSCANF:
case BUILT_IN_VPRINTF:
case BUILT_IN_VSCANF:
case BUILT_IN_VSNPRINTF:
case BUILT_IN_VSPRINTF:
case BUILT_IN_VSSCANF:
return "<stdio.h>";
case BUILT_IN_ISALNUM:
case BUILT_IN_ISALPHA:
case BUILT_IN_ISBLANK:
case BUILT_IN_ISCNTRL:
case BUILT_IN_ISDIGIT:
case BUILT_IN_ISGRAPH:
case BUILT_IN_ISLOWER:
case BUILT_IN_ISPRINT:
case BUILT_IN_ISPUNCT:
case BUILT_IN_ISSPACE:
case BUILT_IN_ISUPPER:
case BUILT_IN_ISXDIGIT:
case BUILT_IN_TOLOWER:
case BUILT_IN_TOUPPER:
return "<ctype.h>";
case BUILT_IN_ISWALNUM:
case BUILT_IN_ISWALPHA:
case BUILT_IN_ISWBLANK:
case BUILT_IN_ISWCNTRL:
case BUILT_IN_ISWDIGIT:
case BUILT_IN_ISWGRAPH:
case BUILT_IN_ISWLOWER:
case BUILT_IN_ISWPRINT:
case BUILT_IN_ISWPUNCT:
case BUILT_IN_ISWSPACE:
case BUILT_IN_ISWUPPER:
case BUILT_IN_ISWXDIGIT:
case BUILT_IN_TOWLOWER:
case BUILT_IN_TOWUPPER:
return "<wctype.h>";
case BUILT_IN_ABORT:
case BUILT_IN_ABS:
case BUILT_IN_CALLOC:
case BUILT_IN_EXIT:
case BUILT_IN_FREE:
case BUILT_IN_LABS:
case BUILT_IN_LLABS:
case BUILT_IN_MALLOC:
case BUILT_IN_REALLOC:
case BUILT_IN__EXIT2:
case BUILT_IN_ALIGNED_ALLOC:
return "<stdlib.h>";
case BUILT_IN_IMAXABS:
return "<inttypes.h>";
case BUILT_IN_STRFTIME:
return "<time.h>";
default:
return NULL;
}
}
/* Generate an implicit declaration for identifier FUNCTIONID at LOC as a
function of type int (). */
tree
implicitly_declare (location_t loc, tree functionid)
{
struct c_binding *b;
tree decl = 0;
tree asmspec_tree;
for (b = I_SYMBOL_BINDING (functionid); b; b = b->shadowed)
{
if (B_IN_SCOPE (b, external_scope))
{
decl = b->decl;
break;
}
}
if (decl)
{
if (decl == error_mark_node)
return decl;
/* FIXME: Objective-C has weird not-really-builtin functions
which are supposed to be visible automatically. They wind up
in the external scope because they're pushed before the file
scope gets created. Catch this here and rebind them into the
file scope. */
if (!DECL_BUILT_IN (decl) && DECL_IS_BUILTIN (decl))
{
bind (functionid, decl, file_scope,
/*invisible=*/false, /*nested=*/true,
DECL_SOURCE_LOCATION (decl));
return decl;
}
else
{
tree newtype = default_function_type;
if (b->u.type)
TREE_TYPE (decl) = b->u.type;
/* Implicit declaration of a function already declared
(somehow) in a different scope, or as a built-in.
If this is the first time this has happened, warn;
then recycle the old declaration but with the new type. */
if (!C_DECL_IMPLICIT (decl))
{
implicit_decl_warning (loc, functionid, decl);
C_DECL_IMPLICIT (decl) = 1;
}
if (DECL_BUILT_IN (decl))
{
newtype = build_type_attribute_variant (newtype,
TYPE_ATTRIBUTES
(TREE_TYPE (decl)));
if (!comptypes (newtype, TREE_TYPE (decl)))
{
bool warned = warning_at (loc, 0, "incompatible implicit "
"declaration of built-in "
"function %qD", decl);
/* See if we can hint which header to include. */
const char *header
= header_for_builtin_fn (DECL_FUNCTION_CODE (decl));
if (header != NULL && warned)
inform (loc, "include %qs or provide a declaration of %qD",
header, decl);
newtype = TREE_TYPE (decl);
}
}
else
{
if (!comptypes (newtype, TREE_TYPE (decl)))
{
error_at (loc, "incompatible implicit declaration of "
"function %qD", decl);
locate_old_decl (decl);
}
}
b->u.type = TREE_TYPE (decl);
TREE_TYPE (decl) = newtype;
bind (functionid, decl, current_scope,
/*invisible=*/false, /*nested=*/true,
DECL_SOURCE_LOCATION (decl));
return decl;
}
}
/* Not seen before. */
decl = build_decl (loc, FUNCTION_DECL, functionid, default_function_type);
DECL_EXTERNAL (decl) = 1;
TREE_PUBLIC (decl) = 1;
C_DECL_IMPLICIT (decl) = 1;
implicit_decl_warning (loc, functionid, 0);
asmspec_tree = maybe_apply_renaming_pragma (decl, /*asmname=*/NULL);
if (asmspec_tree)
set_user_assembler_name (decl, TREE_STRING_POINTER (asmspec_tree));
/* C89 says implicit declarations are in the innermost block.
So we record the decl in the standard fashion. */
decl = pushdecl (decl);
/* No need to call objc_check_decl here - it's a function type. */
rest_of_decl_compilation (decl, 0, 0);
/* Write a record describing this implicit function declaration
to the prototypes file (if requested). */
gen_aux_info_record (decl, 0, 1, 0);
/* Possibly apply some default attributes to this implicit declaration. */
decl_attributes (&decl, NULL_TREE, 0);
return decl;
}
/* Issue an error message for a reference to an undeclared variable
ID, including a reference to a builtin outside of function-call
context. Establish a binding of the identifier to error_mark_node
in an appropriate scope, which will suppress further errors for the
same identifier. The error message should be given location LOC. */
void
undeclared_variable (location_t loc, tree id)
{
static bool already = false;
struct c_scope *scope;
if (current_function_decl == 0)
{
error_at (loc, "%qE undeclared here (not in a function)", id);
scope = current_scope;
}
else
{
if (!objc_diagnose_private_ivar (id))
error_at (loc, "%qE undeclared (first use in this function)", id);
if (!already)
{
inform (loc, "each undeclared identifier is reported only"
" once for each function it appears in");
already = true;
}
/* If we are parsing old-style parameter decls, current_function_decl
will be nonnull but current_function_scope will be null. */
scope = current_function_scope ? current_function_scope : current_scope;
}
bind (id, error_mark_node, scope, /*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
}
/* Subroutine of lookup_label, declare_label, define_label: construct a
LABEL_DECL with all the proper frills. Also create a struct
c_label_vars initialized for the current scope. */
static tree
make_label (location_t location, tree name, bool defining,
struct c_label_vars **p_label_vars)
{
tree label = build_decl (location, LABEL_DECL, name, void_type_node);
DECL_CONTEXT (label) = current_function_decl;
DECL_MODE (label) = VOIDmode;
c_label_vars *label_vars = ggc_alloc<c_label_vars> ();
label_vars->shadowed = NULL;
set_spot_bindings (&label_vars->label_bindings, defining);
label_vars->decls_in_scope = make_tree_vector ();
label_vars->gotos = NULL;
*p_label_vars = label_vars;
return label;
}
/* Get the LABEL_DECL corresponding to identifier NAME as a label.
Create one if none exists so far for the current function.
This is called when a label is used in a goto expression or
has its address taken. */
tree
lookup_label (tree name)
{
tree label;
struct c_label_vars *label_vars;
if (current_function_scope == 0)
{
error ("label %qE referenced outside of any function", name);
return 0;
}
/* Use a label already defined or ref'd with this name, but not if
it is inherited from a containing function and wasn't declared
using __label__. */
label = I_LABEL_DECL (name);
if (label && (DECL_CONTEXT (label) == current_function_decl
|| C_DECLARED_LABEL_FLAG (label)))
{
/* If the label has only been declared, update its apparent
location to point here, for better diagnostics if it
turns out not to have been defined. */
if (DECL_INITIAL (label) == NULL_TREE)
DECL_SOURCE_LOCATION (label) = input_location;
return label;
}
/* No label binding for that identifier; make one. */
label = make_label (input_location, name, false, &label_vars);
/* Ordinary labels go in the current function scope. */
bind_label (name, label, current_function_scope, label_vars);
return label;
}
/* Issue a warning about DECL for a goto statement at GOTO_LOC going
to LABEL. */
static void
warn_about_goto (location_t goto_loc, tree label, tree decl)
{
if (variably_modified_type_p (TREE_TYPE (decl), NULL_TREE))
error_at (goto_loc,
"jump into scope of identifier with variably modified type");
else
warning_at (goto_loc, OPT_Wjump_misses_init,
"jump skips variable initialization");
inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label);
inform (DECL_SOURCE_LOCATION (decl), "%qD declared here", decl);
}
/* Look up a label because of a goto statement. This is like
lookup_label, but also issues any appropriate warnings. */
tree
lookup_label_for_goto (location_t loc, tree name)
{
tree label;
struct c_label_vars *label_vars;
unsigned int ix;
tree decl;
label = lookup_label (name);
if (label == NULL_TREE)
return NULL_TREE;
/* If we are jumping to a different function, we can't issue any
useful warnings. */
if (DECL_CONTEXT (label) != current_function_decl)
{
gcc_assert (C_DECLARED_LABEL_FLAG (label));
return label;
}
label_vars = I_LABEL_BINDING (name)->u.label;
/* If the label has not yet been defined, then push this goto on a
list for possible later warnings. */
if (label_vars->label_bindings.scope == NULL)
{
c_goto_bindings *g = ggc_alloc<c_goto_bindings> ();
g->loc = loc;
set_spot_bindings (&g->goto_bindings, true);
vec_safe_push (label_vars->gotos, g);
return label;
}
/* If there are any decls in label_vars->decls_in_scope, then this
goto has missed the declaration of the decl. This happens for a
case like
int i = 1;
lab:
...
goto lab;
Issue a warning or error. */
FOR_EACH_VEC_SAFE_ELT (label_vars->decls_in_scope, ix, decl)
warn_about_goto (loc, label, decl);
if (label_vars->label_bindings.left_stmt_expr)
{
error_at (loc, "jump into statement expression");
inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label);
}
return label;
}
/* Make a label named NAME in the current function, shadowing silently
any that may be inherited from containing functions or containing
scopes. This is called for __label__ declarations. */
tree
declare_label (tree name)
{
struct c_binding *b = I_LABEL_BINDING (name);
tree label;
struct c_label_vars *label_vars;
/* Check to make sure that the label hasn't already been declared
at this scope */
if (b && B_IN_CURRENT_SCOPE (b))
{
error ("duplicate label declaration %qE", name);
locate_old_decl (b->decl);
/* Just use the previous declaration. */
return b->decl;
}
label = make_label (input_location, name, false, &label_vars);
C_DECLARED_LABEL_FLAG (label) = 1;
/* Declared labels go in the current scope. */
bind_label (name, label, current_scope, label_vars);
return label;
}
/* When we define a label, issue any appropriate warnings if there are
any gotos earlier in the function which jump to this label. */
static void
check_earlier_gotos (tree label, struct c_label_vars* label_vars)
{
unsigned int ix;
struct c_goto_bindings *g;
FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g)
{
struct c_binding *b;
struct c_scope *scope;
/* We have a goto to this label. The goto is going forward. In
g->scope, the goto is going to skip any binding which was
defined after g->bindings_in_scope. */
if (g->goto_bindings.scope->has_jump_unsafe_decl)
{
for (b = g->goto_bindings.scope->bindings;
b != g->goto_bindings.bindings_in_scope;
b = b->prev)
{
if (decl_jump_unsafe (b->decl))
warn_about_goto (g->loc, label, b->decl);
}
}
/* We also need to warn about decls defined in any scopes
between the scope of the label and the scope of the goto. */
for (scope = label_vars->label_bindings.scope;
scope != g->goto_bindings.scope;
scope = scope->outer)
{
gcc_assert (scope != NULL);
if (scope->has_jump_unsafe_decl)
{
if (scope == label_vars->label_bindings.scope)
b = label_vars->label_bindings.bindings_in_scope;
else
b = scope->bindings;
for (; b != NULL; b = b->prev)
{
if (decl_jump_unsafe (b->decl))
warn_about_goto (g->loc, label, b->decl);
}
}
}
if (g->goto_bindings.stmt_exprs > 0)
{
error_at (g->loc, "jump into statement expression");
inform (DECL_SOURCE_LOCATION (label), "label %qD defined here",
label);
}
}
/* Now that the label is defined, we will issue warnings about
subsequent gotos to this label when we see them. */
vec_safe_truncate (label_vars->gotos, 0);
label_vars->gotos = NULL;
}
/* Define a label, specifying the location in the source file.
Return the LABEL_DECL node for the label, if the definition is valid.
Otherwise return 0. */
tree
define_label (location_t location, tree name)
{
/* Find any preexisting label with this name. It is an error
if that label has already been defined in this function, or
if there is a containing function with a declared label with
the same name. */
tree label = I_LABEL_DECL (name);
if (label
&& ((DECL_CONTEXT (label) == current_function_decl
&& DECL_INITIAL (label) != 0)
|| (DECL_CONTEXT (label) != current_function_decl
&& C_DECLARED_LABEL_FLAG (label))))
{
error_at (location, "duplicate label %qD", label);
locate_old_decl (label);
return 0;
}
else if (label && DECL_CONTEXT (label) == current_function_decl)
{
struct c_label_vars *label_vars = I_LABEL_BINDING (name)->u.label;
/* The label has been used or declared already in this function,
but not defined. Update its location to point to this
definition. */
DECL_SOURCE_LOCATION (label) = location;
set_spot_bindings (&label_vars->label_bindings, true);
/* Issue warnings as required about any goto statements from
earlier in the function. */
check_earlier_gotos (label, label_vars);
}
else
{
struct c_label_vars *label_vars;
/* No label binding for that identifier; make one. */
label = make_label (location, name, true, &label_vars);
/* Ordinary labels go in the current function scope. */
bind_label (name, label, current_function_scope, label_vars);
}
if (!in_system_header_at (input_location) && lookup_name (name))
warning_at (location, OPT_Wtraditional,
"traditional C lacks a separate namespace "
"for labels, identifier %qE conflicts", name);
/* Mark label as having been defined. */
DECL_INITIAL (label) = error_mark_node;
return label;
}
/* Get the bindings for a new switch statement. This is used to issue
warnings as appropriate for jumps from the switch to case or
default labels. */
struct c_spot_bindings *
c_get_switch_bindings (void)
{
struct c_spot_bindings *switch_bindings;
switch_bindings = XNEW (struct c_spot_bindings);
set_spot_bindings (switch_bindings, true);
return switch_bindings;
}
void
c_release_switch_bindings (struct c_spot_bindings *bindings)
{
gcc_assert (bindings->stmt_exprs == 0 && !bindings->left_stmt_expr);
XDELETE (bindings);
}
/* This is called at the point of a case or default label to issue
warnings about decls as needed. It returns true if it found an
error, not just a warning. */
bool
c_check_switch_jump_warnings (struct c_spot_bindings *switch_bindings,
location_t switch_loc, location_t case_loc)
{
bool saw_error;
struct c_scope *scope;
saw_error = false;
for (scope = current_scope;
scope != switch_bindings->scope;
scope = scope->outer)
{
struct c_binding *b;
gcc_assert (scope != NULL);
if (!scope->has_jump_unsafe_decl)
continue;
for (b = scope->bindings; b != NULL; b = b->prev)
{
if (decl_jump_unsafe (b->decl))
{
if (variably_modified_type_p (TREE_TYPE (b->decl), NULL_TREE))
{
saw_error = true;
error_at (case_loc,
("switch jumps into scope of identifier with "
"variably modified type"));
}
else
warning_at (case_loc, OPT_Wjump_misses_init,
"switch jumps over variable initialization");
inform (switch_loc, "switch starts here");
inform (DECL_SOURCE_LOCATION (b->decl), "%qD declared here",
b->decl);
}
}
}
if (switch_bindings->stmt_exprs > 0)
{
saw_error = true;
error_at (case_loc, "switch jumps into statement expression");
inform (switch_loc, "switch starts here");
}
return saw_error;
}
/* Given NAME, an IDENTIFIER_NODE,
return the structure (or union or enum) definition for that name.
If THISLEVEL_ONLY is nonzero, searches only the current_scope.
CODE says which kind of type the caller wants;
it is RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE.
If PLOC is not NULL and this returns non-null, it sets *PLOC to the
location where the tag was defined.
If the wrong kind of type is found, an error is reported. */
static tree
lookup_tag (enum tree_code code, tree name, int thislevel_only,
location_t *ploc)
{
struct c_binding *b = I_TAG_BINDING (name);
int thislevel = 0;
if (!b || !b->decl)
return 0;
/* We only care about whether it's in this level if
thislevel_only was set or it might be a type clash. */
if (thislevel_only || TREE_CODE (b->decl) != code)
{
/* For our purposes, a tag in the external scope is the same as
a tag in the file scope. (Primarily relevant to Objective-C
and its builtin structure tags, which get pushed before the
file scope is created.) */
if (B_IN_CURRENT_SCOPE (b)
|| (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b)))
thislevel = 1;
}
if (thislevel_only && !thislevel)
return 0;
if (TREE_CODE (b->decl) != code)
{
/* Definition isn't the kind we were looking for. */
pending_invalid_xref = name;
pending_invalid_xref_location = input_location;
/* If in the same binding level as a declaration as a tag
of a different type, this must not be allowed to
shadow that tag, so give the error immediately.
(For example, "struct foo; union foo;" is invalid.) */
if (thislevel)
pending_xref_error ();
}
if (ploc != NULL)
*ploc = b->locus;
return b->decl;
}
/* Print an error message now
for a recent invalid struct, union or enum cross reference.
We don't print them immediately because they are not invalid
when used in the `struct foo;' construct for shadowing. */
void
pending_xref_error (void)
{
if (pending_invalid_xref != 0)
error_at (pending_invalid_xref_location, "%qE defined as wrong kind of tag",
pending_invalid_xref);
pending_invalid_xref = 0;
}
/* Look up NAME in the current scope and its superiors
in the namespace of variables, functions and typedefs.
Return a ..._DECL node of some kind representing its definition,
or return 0 if it is undefined. */
tree
lookup_name (tree name)
{
struct c_binding *b = I_SYMBOL_BINDING (name);
if (b && !b->invisible)
{
maybe_record_typedef_use (b->decl);
return b->decl;
}
return 0;
}
/* Similar to `lookup_name' but look only at the indicated scope. */
static tree
lookup_name_in_scope (tree name, struct c_scope *scope)
{
struct c_binding *b;
for (b = I_SYMBOL_BINDING (name); b; b = b->shadowed)
if (B_IN_SCOPE (b, scope))
return b->decl;
return 0;
}
/* Create the predefined scalar types of C,
and some nodes representing standard constants (0, 1, (void *) 0).
Initialize the global scope.
Make definitions for built-in primitive functions. */
void
c_init_decl_processing (void)
{
location_t save_loc = input_location;
/* Initialize reserved words for parser. */
c_parse_init ();
current_function_decl = 0;
gcc_obstack_init (&parser_obstack);
/* Make the externals scope. */
push_scope ();
external_scope = current_scope;
/* Declarations from c_common_nodes_and_builtins must not be associated
with this input file, lest we get differences between using and not
using preprocessed headers. */
input_location = BUILTINS_LOCATION;
c_common_nodes_and_builtins ();
/* In C, comparisons and TRUTH_* expressions have type int. */
truthvalue_type_node = integer_type_node;
truthvalue_true_node = integer_one_node;
truthvalue_false_node = integer_zero_node;
/* Even in C99, which has a real boolean type. */
pushdecl (build_decl (UNKNOWN_LOCATION, TYPE_DECL, get_identifier ("_Bool"),
boolean_type_node));
input_location = save_loc;
make_fname_decl = c_make_fname_decl;
start_fname_decls ();
}
/* Create the VAR_DECL at LOC for __FUNCTION__ etc. ID is the name to
give the decl, NAME is the initialization string and TYPE_DEP
indicates whether NAME depended on the type of the function. As we
don't yet implement delayed emission of static data, we mark the
decl as emitted so it is not placed in the output. Anything using
it must therefore pull out the STRING_CST initializer directly.
FIXME. */
static tree
c_make_fname_decl (location_t loc, tree id, int type_dep)
{
const char *name = fname_as_string (type_dep);
tree decl, type, init;
size_t length = strlen (name);
type = build_array_type (char_type_node,
build_index_type (size_int (length)));
type = c_build_qualified_type (type, TYPE_QUAL_CONST);
decl = build_decl (loc, VAR_DECL, id, type);
TREE_STATIC (decl) = 1;
TREE_READONLY (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
init = build_string (length + 1, name);
free (CONST_CAST (char *, name));
TREE_TYPE (init) = type;
DECL_INITIAL (decl) = init;
TREE_USED (decl) = 1;
if (current_function_decl
/* For invalid programs like this:
void foo()
const char* p = __FUNCTION__;
the __FUNCTION__ is believed to appear in K&R style function
parameter declarator. In that case we still don't have
function_scope. */
&& (!seen_error () || current_function_scope))
{
DECL_CONTEXT (decl) = current_function_decl;
bind (id, decl, current_function_scope,
/*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION);
}
finish_decl (decl, loc, init, NULL_TREE, NULL_TREE);
return decl;
}
tree
c_builtin_function (tree decl)
{
tree type = TREE_TYPE (decl);
tree id = DECL_NAME (decl);
const char *name = IDENTIFIER_POINTER (id);
C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type);
/* Should never be called on a symbol with a preexisting meaning. */
gcc_assert (!I_SYMBOL_BINDING (id));
bind (id, decl, external_scope, /*invisible=*/true, /*nested=*/false,
UNKNOWN_LOCATION);
/* Builtins in the implementation namespace are made visible without
needing to be explicitly declared. See push_file_scope. */
if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1])))
{
DECL_CHAIN (decl) = visible_builtins;
visible_builtins = decl;
}
return decl;
}
tree
c_builtin_function_ext_scope (tree decl)
{
tree type = TREE_TYPE (decl);
tree id = DECL_NAME (decl);
const char *name = IDENTIFIER_POINTER (id);
C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type);
if (external_scope)
bind (id, decl, external_scope, /*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
/* Builtins in the implementation namespace are made visible without
needing to be explicitly declared. See push_file_scope. */
if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1])))
{
DECL_CHAIN (decl) = visible_builtins;
visible_builtins = decl;
}
return decl;
}
/* Called when a declaration is seen that contains no names to declare.
If its type is a reference to a structure, union or enum inherited
from a containing scope, shadow that tag name for the current scope
with a forward reference.
If its type defines a new named structure or union
or defines an enum, it is valid but we need not do anything here.
Otherwise, it is an error. */
void
shadow_tag (const struct c_declspecs *declspecs)
{
shadow_tag_warned (declspecs, 0);
}
/* WARNED is 1 if we have done a pedwarn, 2 if we have done a warning,
but no pedwarn. */
void
shadow_tag_warned (const struct c_declspecs *declspecs, int warned)
{
bool found_tag = false;
if (declspecs->type && !declspecs->default_int_p && !declspecs->typedef_p)
{
tree value = declspecs->type;
enum tree_code code = TREE_CODE (value);
if (code == RECORD_TYPE || code == UNION_TYPE || code == ENUMERAL_TYPE)
/* Used to test also that TYPE_SIZE (value) != 0.
That caused warning for `struct foo;' at top level in the file. */
{
tree name = TYPE_NAME (value);
tree t;
found_tag = true;
if (declspecs->restrict_p)
{
error ("invalid use of %<restrict%>");
warned = 1;
}
if (name == 0)
{
if (warned != 1 && code != ENUMERAL_TYPE)
/* Empty unnamed enum OK */
{
pedwarn (input_location, 0,
"unnamed struct/union that defines no instances");
warned = 1;
}
}
else if (declspecs->typespec_kind != ctsk_tagdef
&& declspecs->typespec_kind != ctsk_tagfirstref
&& declspecs->storage_class != csc_none)
{
if (warned != 1)
pedwarn (input_location, 0,
"empty declaration with storage class specifier "
"does not redeclare tag");
warned = 1;
pending_xref_error ();
}
else if (declspecs->typespec_kind != ctsk_tagdef
&& declspecs->typespec_kind != ctsk_tagfirstref
&& (declspecs->const_p
|| declspecs->volatile_p
|| declspecs->atomic_p
|| declspecs->restrict_p
|| declspecs->address_space))
{
if (warned != 1)
pedwarn (input_location, 0,
"empty declaration with type qualifier "
"does not redeclare tag");
warned = 1;
pending_xref_error ();
}
else if (declspecs->typespec_kind != ctsk_tagdef
&& declspecs->typespec_kind != ctsk_tagfirstref
&& declspecs->alignas_p)
{
if (warned != 1)
pedwarn (input_location, 0,
"empty declaration with %<_Alignas%> "
"does not redeclare tag");
warned = 1;
pending_xref_error ();
}
else
{
pending_invalid_xref = 0;
t = lookup_tag (code, name, 1, NULL);
if (t == 0)
{
t = make_node (code);
pushtag (input_location, name, t);
}
}
}
else
{
if (warned != 1 && !in_system_header_at (input_location))
{
pedwarn (input_location, 0,
"useless type name in empty declaration");
warned = 1;
}
}
}
else if (warned != 1 && !in_system_header_at (input_location)
&& declspecs->typedef_p)
{
pedwarn (input_location, 0, "useless type name in empty declaration");
warned = 1;
}
pending_invalid_xref = 0;
if (declspecs->inline_p)
{
error ("%<inline%> in empty declaration");
warned = 1;
}
if (declspecs->noreturn_p)
{
error ("%<_Noreturn%> in empty declaration");
warned = 1;
}
if (current_scope == file_scope && declspecs->storage_class == csc_auto)
{
error ("%<auto%> in file-scope empty declaration");
warned = 1;
}
if (current_scope == file_scope && declspecs->storage_class == csc_register)
{
error ("%<register%> in file-scope empty declaration");
warned = 1;
}
if (!warned && !in_system_header_at (input_location)
&& declspecs->storage_class != csc_none)
{
warning (0, "useless storage class specifier in empty declaration");
warned = 2;
}
if (!warned && !in_system_header_at (input_location) && declspecs->thread_p)
{
warning (0, "useless %qs in empty declaration",
declspecs->thread_gnu_p ? "__thread" : "_Thread_local");
warned = 2;
}
if (!warned
&& !in_system_header_at (input_location)
&& (declspecs->const_p
|| declspecs->volatile_p
|| declspecs->atomic_p
|| declspecs->restrict_p
|| declspecs->address_space))
{
warning (0, "useless type qualifier in empty declaration");
warned = 2;
}
if (!warned && !in_system_header_at (input_location)
&& declspecs->alignas_p)
{
warning (0, "useless %<_Alignas%> in empty declaration");
warned = 2;
}
if (warned != 1)
{
if (!found_tag)
pedwarn (input_location, 0, "empty declaration");
}
}
/* Return the qualifiers from SPECS as a bitwise OR of TYPE_QUAL_*
bits. SPECS represents declaration specifiers that the grammar
only permits to contain type qualifiers and attributes. */
int
quals_from_declspecs (const struct c_declspecs *specs)
{
int quals = ((specs->const_p ? TYPE_QUAL_CONST : 0)
| (specs->volatile_p ? TYPE_QUAL_VOLATILE : 0)
| (specs->restrict_p ? TYPE_QUAL_RESTRICT : 0)
| (specs->atomic_p ? TYPE_QUAL_ATOMIC : 0)
| (ENCODE_QUAL_ADDR_SPACE (specs->address_space)));
gcc_assert (!specs->type
&& !specs->decl_attr
&& specs->typespec_word == cts_none
&& specs->storage_class == csc_none
&& !specs->typedef_p
&& !specs->explicit_signed_p
&& !specs->deprecated_p
&& !specs->long_p
&& !specs->long_long_p
&& !specs->short_p
&& !specs->signed_p
&& !specs->unsigned_p
&& !specs->complex_p
&& !specs->inline_p
&& !specs->noreturn_p
&& !specs->thread_p);
return quals;
}
/* Construct an array declarator. LOC is the location of the
beginning of the array (usually the opening brace). EXPR is the
expression inside [], or NULL_TREE. QUALS are the type qualifiers
inside the [] (to be applied to the pointer to which a parameter
array is converted). STATIC_P is true if "static" is inside the
[], false otherwise. VLA_UNSPEC_P is true if the array is [*], a
VLA of unspecified length which is nevertheless a complete type,
false otherwise. The field for the contained declarator is left to
be filled in by set_array_declarator_inner. */
struct c_declarator *
build_array_declarator (location_t loc,
tree expr, struct c_declspecs *quals, bool static_p,
bool vla_unspec_p)
{
struct c_declarator *declarator = XOBNEW (&parser_obstack,
struct c_declarator);
declarator->id_loc = loc;
declarator->kind = cdk_array;
declarator->declarator = 0;
declarator->u.array.dimen = expr;
if (quals)
{
declarator->u.array.attrs = quals->attrs;
declarator->u.array.quals = quals_from_declspecs (quals);
}
else
{
declarator->u.array.attrs = NULL_TREE;
declarator->u.array.quals = 0;
}
declarator->u.array.static_p = static_p;
declarator->u.array.vla_unspec_p = vla_unspec_p;
if (static_p || quals != NULL)
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support %<static%> or type "
"qualifiers in parameter array declarators");
if (vla_unspec_p)
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support %<[*]%> array declarators");
if (vla_unspec_p)
{
if (!current_scope->parm_flag)
{
/* C99 6.7.5.2p4 */
error_at (loc, "%<[*]%> not allowed in other than "
"function prototype scope");
declarator->u.array.vla_unspec_p = false;
return NULL;
}
current_scope->had_vla_unspec = true;
}
return declarator;
}
/* Set the contained declarator of an array declarator. DECL is the
declarator, as constructed by build_array_declarator; INNER is what
appears on the left of the []. */
struct c_declarator *
set_array_declarator_inner (struct c_declarator *decl,
struct c_declarator *inner)
{
decl->declarator = inner;
return decl;
}
/* INIT is a constructor that forms DECL's initializer. If the final
element initializes a flexible array field, add the size of that
initializer to DECL's size. */
static void
add_flexible_array_elts_to_size (tree decl, tree init)
{
tree elt, type;
if (vec_safe_is_empty (CONSTRUCTOR_ELTS (init)))
return;
elt = CONSTRUCTOR_ELTS (init)->last ().value;
type = TREE_TYPE (elt);
if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_SIZE (type) == NULL_TREE
&& TYPE_DOMAIN (type) != NULL_TREE
&& TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE)
{
complete_array_type (&type, elt, false);
DECL_SIZE (decl)
= size_binop (PLUS_EXPR, DECL_SIZE (decl), TYPE_SIZE (type));
DECL_SIZE_UNIT (decl)
= size_binop (PLUS_EXPR, DECL_SIZE_UNIT (decl), TYPE_SIZE_UNIT (type));
}
}
/* Decode a "typename", such as "int **", returning a ..._TYPE node.
Set *EXPR, if EXPR not NULL, to any expression to be evaluated
before the type name, and set *EXPR_CONST_OPERANDS, if
EXPR_CONST_OPERANDS not NULL, to indicate whether the type name may
appear in a constant expression. */
tree
groktypename (struct c_type_name *type_name, tree *expr,
bool *expr_const_operands)
{
tree type;
tree attrs = type_name->specs->attrs;
type_name->specs->attrs = NULL_TREE;
type = grokdeclarator (type_name->declarator, type_name->specs, TYPENAME,
false, NULL, &attrs, expr, expr_const_operands,
DEPRECATED_NORMAL);
/* Apply attributes. */
decl_attributes (&type, attrs, 0);
return type;
}
/* Wrapper for decl_attributes that adds some implicit attributes
to VAR_DECLs or FUNCTION_DECLs. */
static tree
c_decl_attributes (tree *node, tree attributes, int flags)
{
/* Add implicit "omp declare target" attribute if requested. */
if (current_omp_declare_target_attribute
&& ((TREE_CODE (*node) == VAR_DECL
&& (TREE_STATIC (*node) || DECL_EXTERNAL (*node)))
|| TREE_CODE (*node) == FUNCTION_DECL))
{
if (TREE_CODE (*node) == VAR_DECL
&& ((DECL_CONTEXT (*node)
&& TREE_CODE (DECL_CONTEXT (*node)) == FUNCTION_DECL)
|| (current_function_decl && !DECL_EXTERNAL (*node))))
error ("%q+D in block scope inside of declare target directive",
*node);
else if (TREE_CODE (*node) == VAR_DECL
&& !lang_hooks.types.omp_mappable_type (TREE_TYPE (*node)))
error ("%q+D in declare target directive does not have mappable type",
*node);
else
attributes = tree_cons (get_identifier ("omp declare target"),
NULL_TREE, attributes);
}
return decl_attributes (node, attributes, flags);
}
/* Decode a declarator in an ordinary declaration or data definition.
This is called as soon as the type information and variable name
have been parsed, before parsing the initializer if any.
Here we create the ..._DECL node, fill in its type,
and put it on the list of decls for the current context.
The ..._DECL node is returned as the value.
Exception: for arrays where the length is not specified,
the type is left null, to be filled in by `finish_decl'.
Function definitions do not come here; they go to start_function
instead. However, external and forward declarations of functions
do go through here. Structure field declarations are done by
grokfield and not through here. */
tree
start_decl (struct c_declarator *declarator, struct c_declspecs *declspecs,
bool initialized, tree attributes)
{
tree decl;
tree tem;
tree expr = NULL_TREE;
enum deprecated_states deprecated_state = DEPRECATED_NORMAL;
/* An object declared as __attribute__((deprecated)) suppresses
warnings of uses of other deprecated items. */
if (lookup_attribute ("deprecated", attributes))
deprecated_state = DEPRECATED_SUPPRESS;
decl = grokdeclarator (declarator, declspecs,
NORMAL, initialized, NULL, &attributes, &expr, NULL,
deprecated_state);
if (!decl || decl == error_mark_node)
return NULL_TREE;
if (expr)
add_stmt (fold_convert (void_type_node, expr));
if (TREE_CODE (decl) != FUNCTION_DECL && MAIN_NAME_P (DECL_NAME (decl)))
warning (OPT_Wmain, "%q+D is usually a function", decl);
if (initialized)
/* Is it valid for this decl to have an initializer at all?
If not, set INITIALIZED to zero, which will indirectly
tell 'finish_decl' to ignore the initializer once it is parsed. */
switch (TREE_CODE (decl))
{
case TYPE_DECL:
error ("typedef %qD is initialized (use __typeof__ instead)", decl);
initialized = 0;
break;
case FUNCTION_DECL:
error ("function %qD is initialized like a variable", decl);
initialized = 0;
break;
case PARM_DECL:
/* DECL_INITIAL in a PARM_DECL is really DECL_ARG_TYPE. */
error ("parameter %qD is initialized", decl);
initialized = 0;
break;
default:
/* Don't allow initializations for incomplete types except for
arrays which might be completed by the initialization. */
/* This can happen if the array size is an undefined macro.
We already gave a warning, so we don't need another one. */
if (TREE_TYPE (decl) == error_mark_node)
initialized = 0;
else if (COMPLETE_TYPE_P (TREE_TYPE (decl)))
{
/* A complete type is ok if size is fixed. */
if (TREE_CODE (TYPE_SIZE (TREE_TYPE (decl))) != INTEGER_CST
|| C_DECL_VARIABLE_SIZE (decl))
{
error ("variable-sized object may not be initialized");
initialized = 0;
}
}
else if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE)
{
error ("variable %qD has initializer but incomplete type", decl);
initialized = 0;
}
else if (C_DECL_VARIABLE_SIZE (decl))
{
/* Although C99 is unclear about whether incomplete arrays
of VLAs themselves count as VLAs, it does not make
sense to permit them to be initialized given that
ordinary VLAs may not be initialized. */
error ("variable-sized object may not be initialized");
initialized = 0;
}
}
if (initialized)
{
if (current_scope == file_scope)
TREE_STATIC (decl) = 1;
/* Tell 'pushdecl' this is an initialized decl
even though we don't yet have the initializer expression.
Also tell 'finish_decl' it may store the real initializer. */
DECL_INITIAL (decl) = error_mark_node;
}
/* If this is a function declaration, write a record describing it to the
prototypes file (if requested). */
if (TREE_CODE (decl) == FUNCTION_DECL)
gen_aux_info_record (decl, 0, 0, prototype_p (TREE_TYPE (decl)));
/* ANSI specifies that a tentative definition which is not merged with
a non-tentative definition behaves exactly like a definition with an
initializer equal to zero. (Section 3.7.2)
-fno-common gives strict ANSI behavior, though this tends to break
a large body of code that grew up without this rule.
Thread-local variables are never common, since there's no entrenched
body of code to break, and it allows more efficient variable references
in the presence of dynamic linking. */
if (TREE_CODE (decl) == VAR_DECL
&& !initialized
&& TREE_PUBLIC (decl)
&& !DECL_THREAD_LOCAL_P (decl)
&& !flag_no_common)
DECL_COMMON (decl) = 1;
/* Set attributes here so if duplicate decl, will have proper attributes. */
c_decl_attributes (&decl, attributes, 0);
/* Handle gnu_inline attribute. */
if (declspecs->inline_p
&& !flag_gnu89_inline
&& TREE_CODE (decl) == FUNCTION_DECL
&& (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl))
|| current_function_decl))
{
if (declspecs->storage_class == csc_auto && current_scope != file_scope)
;
else if (declspecs->storage_class != csc_static)
DECL_EXTERNAL (decl) = !DECL_EXTERNAL (decl);
}
if (TREE_CODE (decl) == FUNCTION_DECL
&& targetm.calls.promote_prototypes (TREE_TYPE (decl)))
{
struct c_declarator *ce = declarator;
if (ce->kind == cdk_pointer)
ce = declarator->declarator;
if (ce->kind == cdk_function)
{
tree args = ce->u.arg_info->parms;
for (; args; args = DECL_CHAIN (args))
{
tree type = TREE_TYPE (args);
if (type && INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))
DECL_ARG_TYPE (args) = c_type_promotes_to (type);
}
}
}
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_DECLARED_INLINE_P (decl)
&& DECL_UNINLINABLE (decl)
&& lookup_attribute ("noinline", DECL_ATTRIBUTES (decl)))
warning (OPT_Wattributes, "inline function %q+D given attribute noinline",
decl);
/* C99 6.7.4p3: An inline definition of a function with external
linkage shall not contain a definition of a modifiable object
with static storage duration... */
if (TREE_CODE (decl) == VAR_DECL
&& current_scope != file_scope
&& TREE_STATIC (decl)
&& !TREE_READONLY (decl)
&& DECL_DECLARED_INLINE_P (current_function_decl)
&& DECL_EXTERNAL (current_function_decl))
record_inline_static (input_location, current_function_decl,
decl, csi_modifiable);
if (c_dialect_objc ()
&& (TREE_CODE (decl) == VAR_DECL
|| TREE_CODE (decl) == FUNCTION_DECL))
objc_check_global_decl (decl);
/* Add this decl to the current scope.
TEM may equal DECL or it may be a previous decl of the same name. */
tem = pushdecl (decl);
if (initialized && DECL_EXTERNAL (tem))
{
DECL_EXTERNAL (tem) = 0;
TREE_STATIC (tem) = 1;
}
return tem;
}
/* Subroutine of finish_decl. TYPE is the type of an uninitialized object
DECL or the non-array element type if DECL is an uninitialized array.
If that type has a const member, diagnose this. */
static void
diagnose_uninitialized_cst_member (tree decl, tree type)
{
tree field;
for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
{
tree field_type;
if (TREE_CODE (field) != FIELD_DECL)
continue;
field_type = strip_array_types (TREE_TYPE (field));
if (TYPE_QUALS (field_type) & TYPE_QUAL_CONST)
{
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
"uninitialized const member in %qT is invalid in C++",
strip_array_types (TREE_TYPE (decl)));
inform (DECL_SOURCE_LOCATION (field), "%qD should be initialized", field);
}
if (TREE_CODE (field_type) == RECORD_TYPE
|| TREE_CODE (field_type) == UNION_TYPE)
diagnose_uninitialized_cst_member (decl, field_type);
}
}
/* Finish processing of a declaration;
install its initial value.
If ORIGTYPE is not NULL_TREE, it is the original type of INIT.
If the length of an array type is not known before,
it must be determined now, from the initial value, or it is an error.
INIT_LOC is the location of the initial value. */
void
finish_decl (tree decl, location_t init_loc, tree init,
tree origtype, tree asmspec_tree)
{
tree type;
bool was_incomplete = (DECL_SIZE (decl) == 0);
const char *asmspec = 0;
/* If a name was specified, get the string. */
if ((TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == VAR_DECL)
&& DECL_FILE_SCOPE_P (decl))
asmspec_tree = maybe_apply_renaming_pragma (decl, asmspec_tree);
if (asmspec_tree)
asmspec = TREE_STRING_POINTER (asmspec_tree);
if (TREE_CODE (decl) == VAR_DECL
&& TREE_STATIC (decl)
&& global_bindings_p ())
/* So decl is a global variable. Record the types it uses
so that we can decide later to emit debug info for them. */
record_types_used_by_current_var_decl (decl);
/* If `start_decl' didn't like having an initialization, ignore it now. */
if (init != 0 && DECL_INITIAL (decl) == 0)
init = 0;
/* Don't crash if parm is initialized. */
if (TREE_CODE (decl) == PARM_DECL)
init = 0;
if (init)
store_init_value (init_loc, decl, init, origtype);
if (c_dialect_objc () && (TREE_CODE (decl) == VAR_DECL
|| TREE_CODE (decl) == FUNCTION_DECL
|| TREE_CODE (decl) == FIELD_DECL))
objc_check_decl (decl);
type = TREE_TYPE (decl);
/* Deduce size of array from initialization, if not already known. */
if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type) == 0
&& TREE_CODE (decl) != TYPE_DECL)
{
bool do_default
= (TREE_STATIC (decl)
/* Even if pedantic, an external linkage array
may have incomplete type at first. */
? pedantic && !TREE_PUBLIC (decl)
: !DECL_EXTERNAL (decl));
int failure
= complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl),
do_default);
/* Get the completed type made by complete_array_type. */
type = TREE_TYPE (decl);
switch (failure)
{
case 1:
error ("initializer fails to determine size of %q+D", decl);
break;
case 2:
if (do_default)
error ("array size missing in %q+D", decl);
/* If a `static' var's size isn't known,
make it extern as well as static, so it does not get
allocated.
If it is not `static', then do not mark extern;
finish_incomplete_decl will give it a default size
and it will get allocated. */
else if (!pedantic && TREE_STATIC (decl) && !TREE_PUBLIC (decl))
DECL_EXTERNAL (decl) = 1;
break;
case 3:
error ("zero or negative size array %q+D", decl);
break;
case 0:
/* For global variables, update the copy of the type that
exists in the binding. */
if (TREE_PUBLIC (decl))
{
struct c_binding *b_ext = I_SYMBOL_BINDING (DECL_NAME (decl));
while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext))
b_ext = b_ext->shadowed;
if (b_ext)
{
if (b_ext->u.type && comptypes (b_ext->u.type, type))
b_ext->u.type = composite_type (b_ext->u.type, type);
else
b_ext->u.type = type;
}
}
break;
default:
gcc_unreachable ();
}
if (DECL_INITIAL (decl))
TREE_TYPE (DECL_INITIAL (decl)) = type;
relayout_decl (decl);
}
if (TREE_CODE (decl) == VAR_DECL)
{
if (init && TREE_CODE (init) == CONSTRUCTOR)
add_flexible_array_elts_to_size (decl, init);
if (DECL_SIZE (decl) == 0 && TREE_TYPE (decl) != error_mark_node
&& COMPLETE_TYPE_P (TREE_TYPE (decl)))
layout_decl (decl, 0);
if (DECL_SIZE (decl) == 0
/* Don't give an error if we already gave one earlier. */
&& TREE_TYPE (decl) != error_mark_node
&& (TREE_STATIC (decl)
/* A static variable with an incomplete type
is an error if it is initialized.
Also if it is not file scope.
Otherwise, let it through, but if it is not `extern'
then it may cause an error message later. */
? (DECL_INITIAL (decl) != 0
|| !DECL_FILE_SCOPE_P (decl))
/* An automatic variable with an incomplete type
is an error. */
: !DECL_EXTERNAL (decl)))
{
error ("storage size of %q+D isn%'t known", decl);
TREE_TYPE (decl) = error_mark_node;
}
if ((DECL_EXTERNAL (decl) || TREE_STATIC (decl))
&& DECL_SIZE (decl) != 0)
{
if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST)
constant_expression_warning (DECL_SIZE (decl));
else
{
error ("storage size of %q+D isn%'t constant", decl);
TREE_TYPE (decl) = error_mark_node;
}
}
if (TREE_USED (type))
{
TREE_USED (decl) = 1;
DECL_READ_P (decl) = 1;
}
}
/* If this is a function and an assembler name is specified, reset DECL_RTL
so we can give it its new name. Also, update builtin_decl if it
was a normal built-in. */
if (TREE_CODE (decl) == FUNCTION_DECL && asmspec)
{
if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
set_builtin_user_assembler_name (decl, asmspec);
set_user_assembler_name (decl, asmspec);
}
/* If #pragma weak was used, mark the decl weak now. */
maybe_apply_pragma_weak (decl);
/* Output the assembler code and/or RTL code for variables and functions,
unless the type is an undefined structure or union.
If not, it will get done when the type is completed. */
if (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL)
{
/* Determine the ELF visibility. */
if (TREE_PUBLIC (decl))
c_determine_visibility (decl);
/* This is a no-op in c-lang.c or something real in objc-act.c. */
if (c_dialect_objc ())
objc_check_decl (decl);
if (asmspec)
{
/* If this is not a static variable, issue a warning.
It doesn't make any sense to give an ASMSPEC for an
ordinary, non-register local variable. Historically,
GCC has accepted -- but ignored -- the ASMSPEC in
this case. */
if (!DECL_FILE_SCOPE_P (decl)
&& TREE_CODE (decl) == VAR_DECL
&& !C_DECL_REGISTER (decl)
&& !TREE_STATIC (decl))
warning (0, "ignoring asm-specifier for non-static local "
"variable %q+D", decl);
else
set_user_assembler_name (decl, asmspec);
}
if (DECL_FILE_SCOPE_P (decl))
{
if (DECL_INITIAL (decl) == NULL_TREE
|| DECL_INITIAL (decl) == error_mark_node)
/* Don't output anything
when a tentative file-scope definition is seen.
But at end of compilation, do output code for them. */
DECL_DEFER_OUTPUT (decl) = 1;
if (asmspec && C_DECL_REGISTER (decl))
DECL_HARD_REGISTER (decl) = 1;
rest_of_decl_compilation (decl, true, 0);
}
else
{
/* In conjunction with an ASMSPEC, the `register'
keyword indicates that we should place the variable
in a particular register. */
if (asmspec && C_DECL_REGISTER (decl))
{
DECL_HARD_REGISTER (decl) = 1;
/* This cannot be done for a structure with volatile
fields, on which DECL_REGISTER will have been
reset. */
if (!DECL_REGISTER (decl))
error ("cannot put object with volatile field into register");
}
if (TREE_CODE (decl) != FUNCTION_DECL)
{
/* If we're building a variable sized type, and we might be
reachable other than via the top of the current binding
level, then create a new BIND_EXPR so that we deallocate
the object at the right time. */
/* Note that DECL_SIZE can be null due to errors. */
if (DECL_SIZE (decl)
&& !TREE_CONSTANT (DECL_SIZE (decl))
&& STATEMENT_LIST_HAS_LABEL (cur_stmt_list))
{
tree bind;
bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
add_stmt (bind);
BIND_EXPR_BODY (bind) = push_stmt_list ();
}
add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl),
DECL_EXPR, decl));
}
}
if (!DECL_FILE_SCOPE_P (decl))
{
/* Recompute the RTL of a local array now
if it used to be an incomplete type. */
if (was_incomplete
&& !TREE_STATIC (decl) && !DECL_EXTERNAL (decl))
{
/* If we used it already as memory, it must stay in memory. */
TREE_ADDRESSABLE (decl) = TREE_USED (decl);
/* If it's still incomplete now, no init will save it. */
if (DECL_SIZE (decl) == 0)
DECL_INITIAL (decl) = 0;
}
}
}
if (TREE_CODE (decl) == TYPE_DECL)
{
if (!DECL_FILE_SCOPE_P (decl)
&& variably_modified_type_p (TREE_TYPE (decl), NULL_TREE))
add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl));
rest_of_decl_compilation (decl, DECL_FILE_SCOPE_P (decl), 0);
}
/* Install a cleanup (aka destructor) if one was given. */
if (TREE_CODE (decl) == VAR_DECL && !TREE_STATIC (decl))
{
tree attr = lookup_attribute ("cleanup", DECL_ATTRIBUTES (decl));
if (attr)
{
tree cleanup_id = TREE_VALUE (TREE_VALUE (attr));
tree cleanup_decl = lookup_name (cleanup_id);
tree cleanup;
vec<tree, va_gc> *v;
/* Build "cleanup(&decl)" for the destructor. */
cleanup = build_unary_op (input_location, ADDR_EXPR, decl, 0);
vec_alloc (v, 1);
v->quick_push (cleanup);
cleanup = c_build_function_call_vec (DECL_SOURCE_LOCATION (decl),
vNULL, cleanup_decl, v, NULL);
vec_free (v);
/* Don't warn about decl unused; the cleanup uses it. */
TREE_USED (decl) = 1;
TREE_USED (cleanup_decl) = 1;
DECL_READ_P (decl) = 1;
push_cleanup (decl, cleanup, false);
}
}
if (warn_cxx_compat
&& TREE_CODE (decl) == VAR_DECL
&& !DECL_EXTERNAL (decl)
&& DECL_INITIAL (decl) == NULL_TREE)
{
type = strip_array_types (type);
if (TREE_READONLY (decl))
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
"uninitialized const %qD is invalid in C++", decl);
else if ((TREE_CODE (type) == RECORD_TYPE
|| TREE_CODE (type) == UNION_TYPE)
&& C_TYPE_FIELDS_READONLY (type))
diagnose_uninitialized_cst_member (decl, type);
}
invoke_plugin_callbacks (PLUGIN_FINISH_DECL, decl);
}
/* Given a parsed parameter declaration, decode it into a PARM_DECL.
EXPR is NULL or a pointer to an expression that needs to be
evaluated for the side effects of array size expressions in the
parameters. */
tree
grokparm (const struct c_parm *parm, tree *expr)
{
tree attrs = parm->attrs;
tree decl = grokdeclarator (parm->declarator, parm->specs, PARM, false,
NULL, &attrs, expr, NULL, DEPRECATED_NORMAL);
decl_attributes (&decl, attrs, 0);
return decl;
}
/* Given a parsed parameter declaration, decode it into a PARM_DECL
and push that on the current scope. EXPR is a pointer to an
expression that needs to be evaluated for the side effects of array
size expressions in the parameters. */
void
push_parm_decl (const struct c_parm *parm, tree *expr)
{
tree attrs = parm->attrs;
tree decl;
decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL,
&attrs, expr, NULL, DEPRECATED_NORMAL);
decl_attributes (&decl, attrs, 0);
decl = pushdecl (decl);
finish_decl (decl, input_location, NULL_TREE, NULL_TREE, NULL_TREE);
}
/* Mark all the parameter declarations to date as forward decls.
Also diagnose use of this extension. */
void
mark_forward_parm_decls (void)
{
struct c_binding *b;
if (pedantic && !current_scope->warned_forward_parm_decls)
{
pedwarn (input_location, OPT_Wpedantic,
"ISO C forbids forward parameter declarations");
current_scope->warned_forward_parm_decls = true;
}
for (b = current_scope->bindings; b; b = b->prev)
if (TREE_CODE (b->decl) == PARM_DECL)
TREE_ASM_WRITTEN (b->decl) = 1;
}
/* Build a COMPOUND_LITERAL_EXPR. TYPE is the type given in the compound
literal, which may be an incomplete array type completed by the
initializer; INIT is a CONSTRUCTOR at LOC that initializes the compound
literal. NON_CONST is true if the initializers contain something
that cannot occur in a constant expression. */
tree
build_compound_literal (location_t loc, tree type, tree init, bool non_const)
{
/* We do not use start_decl here because we have a type, not a declarator;
and do not use finish_decl because the decl should be stored inside
the COMPOUND_LITERAL_EXPR rather than added elsewhere as a DECL_EXPR. */
tree decl;
tree complit;
tree stmt;
if (type == error_mark_node
|| init == error_mark_node)
return error_mark_node;
decl = build_decl (loc, VAR_DECL, NULL_TREE, type);
DECL_EXTERNAL (decl) = 0;
TREE_PUBLIC (decl) = 0;
TREE_STATIC (decl) = (current_scope == file_scope);
DECL_CONTEXT (decl) = current_function_decl;
TREE_USED (decl) = 1;
DECL_READ_P (decl) = 1;
TREE_TYPE (decl) = type;
TREE_READONLY (decl) = (TYPE_READONLY (type)
|| (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_READONLY (TREE_TYPE (type))));
store_init_value (loc, decl, init, NULL_TREE);
if (TREE_CODE (type) == ARRAY_TYPE && !COMPLETE_TYPE_P (type))
{
int failure = complete_array_type (&TREE_TYPE (decl),
DECL_INITIAL (decl), true);
/* If complete_array_type returns 3, it means that the
initial value of the compound literal is empty. Allow it. */
gcc_assert (failure == 0 || failure == 3);
type = TREE_TYPE (decl);
TREE_TYPE (DECL_INITIAL (decl)) = type;
}
if (type == error_mark_node || !COMPLETE_TYPE_P (type))
{
c_incomplete_type_error (NULL_TREE, type);
return error_mark_node;
}
stmt = build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl);
complit = build1 (COMPOUND_LITERAL_EXPR, type, stmt);
TREE_SIDE_EFFECTS (complit) = 1;
layout_decl (decl, 0);
if (TREE_STATIC (decl))
{
/* This decl needs a name for the assembler output. */
set_compound_literal_name (decl);
DECL_DEFER_OUTPUT (decl) = 1;
DECL_COMDAT (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 1;
pushdecl (decl);
rest_of_decl_compilation (decl, 1, 0);
}
if (non_const)
{
complit = build2 (C_MAYBE_CONST_EXPR, type, NULL, complit);
C_MAYBE_CONST_EXPR_NON_CONST (complit) = 1;
}
return complit;
}
/* Check the type of a compound literal. Here we just check that it
is valid for C++. */
void
check_compound_literal_type (location_t loc, struct c_type_name *type_name)
{
if (warn_cxx_compat
&& (type_name->specs->typespec_kind == ctsk_tagdef
|| type_name->specs->typespec_kind == ctsk_tagfirstref))
warning_at (loc, OPT_Wc___compat,
"defining a type in a compound literal is invalid in C++");
}
/* Determine whether TYPE is a structure with a flexible array member,
or a union containing such a structure (possibly recursively). */
static bool
flexible_array_type_p (tree type)
{
tree x;
switch (TREE_CODE (type))
{
case RECORD_TYPE:
x = TYPE_FIELDS (type);
if (x == NULL_TREE)
return false;
while (DECL_CHAIN (x) != NULL_TREE)
x = DECL_CHAIN (x);
if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE
&& TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE
&& TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE)
return true;
return false;
case UNION_TYPE:
for (x = TYPE_FIELDS (type); x != NULL_TREE; x = DECL_CHAIN (x))
{
if (flexible_array_type_p (TREE_TYPE (x)))
return true;
}
return false;
default:
return false;
}
}
/* Performs sanity checks on the TYPE and WIDTH of the bit-field NAME,
replacing with appropriate values if they are invalid. */
static void
check_bitfield_type_and_width (tree *type, tree *width, tree orig_name)
{
tree type_mv;
unsigned int max_width;
unsigned HOST_WIDE_INT w;
const char *name = (orig_name
? identifier_to_locale (IDENTIFIER_POINTER (orig_name))
: _("<anonymous>"));
/* Detect and ignore out of range field width and process valid
field widths. */
if (!INTEGRAL_TYPE_P (TREE_TYPE (*width)))
{
error ("bit-field %qs width not an integer constant", name);
*width = integer_one_node;
}
else
{
if (TREE_CODE (*width) != INTEGER_CST)
{
*width = c_fully_fold (*width, false, NULL);
if (TREE_CODE (*width) == INTEGER_CST)
pedwarn (input_location, OPT_Wpedantic,
"bit-field %qs width not an integer constant expression",
name);
}
if (TREE_CODE (*width) != INTEGER_CST)
{
error ("bit-field %qs width not an integer constant", name);
*width = integer_one_node;
}
constant_expression_warning (*width);
if (tree_int_cst_sgn (*width) < 0)
{
error ("negative width in bit-field %qs", name);
*width = integer_one_node;
}
else if (integer_zerop (*width) && orig_name)
{
error ("zero width for bit-field %qs", name);
*width = integer_one_node;
}
}
/* Detect invalid bit-field type. */
if (TREE_CODE (*type) != INTEGER_TYPE
&& TREE_CODE (*type) != BOOLEAN_TYPE
&& TREE_CODE (*type) != ENUMERAL_TYPE)
{
error ("bit-field %qs has invalid type", name);
*type = unsigned_type_node;
}
type_mv = TYPE_MAIN_VARIANT (*type);
if (!in_system_header_at (input_location)
&& type_mv != integer_type_node
&& type_mv != unsigned_type_node
&& type_mv != boolean_type_node)
pedwarn_c90 (input_location, OPT_Wpedantic,
"type of bit-field %qs is a GCC extension", name);
max_width = TYPE_PRECISION (*type);
if (0 < compare_tree_int (*width, max_width))
{
error ("width of %qs exceeds its type", name);
w = max_width;
*width = build_int_cst (integer_type_node, w);
}
else
w = tree_to_uhwi (*width);
if (TREE_CODE (*type) == ENUMERAL_TYPE)
{
struct lang_type *lt = TYPE_LANG_SPECIFIC (*type);
if (!lt
|| w < tree_int_cst_min_precision (lt->enum_min, TYPE_SIGN (*type))
|| w < tree_int_cst_min_precision (lt->enum_max, TYPE_SIGN (*type)))
warning (0, "%qs is narrower than values of its type", name);
}
}
/* Print warning about variable length array if necessary. */
static void
warn_variable_length_array (tree name, tree size)
{
if (TREE_CONSTANT (size))
{
if (name)
pedwarn_c90 (input_location, OPT_Wvla,
"ISO C90 forbids array %qE whose size "
"can%'t be evaluated", name);
else
pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array "
"whose size can%'t be evaluated");
}
else
{
if (name)
pedwarn_c90 (input_location, OPT_Wvla,
"ISO C90 forbids variable length array %qE", name);
else
pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable "
"length array");
}
}
/* Print warning about defaulting to int if necessary. */
static void
warn_defaults_to (location_t location, int opt, const char *gmsgid, ...)
{
diagnostic_info diagnostic;
va_list ap;
va_start (ap, gmsgid);
diagnostic_set_info (&diagnostic, gmsgid, &ap, location,
flag_isoc99 ? DK_PEDWARN : DK_WARNING);
diagnostic.option_index = opt;
report_diagnostic (&diagnostic);
va_end (ap);
}
/* Given declspecs and a declarator,
determine the name and type of the object declared
and construct a ..._DECL node for it.
(In one case we can return a ..._TYPE node instead.
For invalid input we sometimes return 0.)
DECLSPECS is a c_declspecs structure for the declaration specifiers.
DECL_CONTEXT says which syntactic context this declaration is in:
NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL.
FUNCDEF for a function definition. Like NORMAL but a few different
error messages in each case. Return value may be zero meaning
this definition is too screwy to try to parse.
PARM for a parameter declaration (either within a function prototype
or before a function body). Make a PARM_DECL, or return void_type_node.
TYPENAME if for a typename (in a cast or sizeof).
Don't make a DECL node; just return the ..._TYPE node.
FIELD for a struct or union field; make a FIELD_DECL.
INITIALIZED is true if the decl has an initializer.
WIDTH is non-NULL for bit-fields, and is a pointer to an INTEGER_CST node
representing the width of the bit-field.
DECL_ATTRS points to the list of attributes that should be added to this
decl. Any nested attributes that belong on the decl itself will be
added to this list.
If EXPR is not NULL, any expressions that need to be evaluated as
part of evaluating variably modified types will be stored in *EXPR.
If EXPR_CONST_OPERANDS is not NULL, *EXPR_CONST_OPERANDS will be
set to indicate whether operands in *EXPR can be used in constant
expressions.
DEPRECATED_STATE is a deprecated_states value indicating whether
deprecation warnings should be suppressed.
In the TYPENAME case, DECLARATOR is really an absolute declarator.
It may also be so in the PARM case, for a prototype where the
argument type is specified but not the name.
This function is where the complicated C meanings of `static'
and `extern' are interpreted. */
static tree
grokdeclarator (const struct c_declarator *declarator,
struct c_declspecs *declspecs,
enum decl_context decl_context, bool initialized, tree *width,
tree *decl_attrs, tree *expr, bool *expr_const_operands,
enum deprecated_states deprecated_state)
{
tree type = declspecs->type;
bool threadp = declspecs->thread_p;
enum c_storage_class storage_class = declspecs->storage_class;
int constp;
int restrictp;
int volatilep;
int atomicp;
int type_quals = TYPE_UNQUALIFIED;
tree name = NULL_TREE;
bool funcdef_flag = false;
bool funcdef_syntax = false;
bool size_varies = false;
tree decl_attr = declspecs->decl_attr;
int array_ptr_quals = TYPE_UNQUALIFIED;
tree array_ptr_attrs = NULL_TREE;
int array_parm_static = 0;
bool array_parm_vla_unspec_p = false;
tree returned_attrs = NULL_TREE;
bool bitfield = width != NULL;
tree element_type;
struct c_arg_info *arg_info = 0;
addr_space_t as1, as2, address_space;
location_t loc = UNKNOWN_LOCATION;
const char *errmsg;
tree expr_dummy;
bool expr_const_operands_dummy;
enum c_declarator_kind first_non_attr_kind;
unsigned int alignas_align = 0;
if (TREE_CODE (type) == ERROR_MARK)
return error_mark_node;
if (expr == NULL)
expr = &expr_dummy;
if (expr_const_operands == NULL)
expr_const_operands = &expr_const_operands_dummy;
*expr = declspecs->expr;
*expr_const_operands = declspecs->expr_const_operands;
if (decl_context == FUNCDEF)
funcdef_flag = true, decl_context = NORMAL;
/* Look inside a declarator for the name being declared
and get it as an IDENTIFIER_NODE, for an error message. */
{
const struct c_declarator *decl = declarator;
first_non_attr_kind = cdk_attrs;
while (decl)
switch (decl->kind)
{
case cdk_array:
loc = decl->id_loc;
/* FALL THRU. */
case cdk_function:
case cdk_pointer:
funcdef_syntax = (decl->kind == cdk_function);
decl = decl->declarator;
if (first_non_attr_kind == cdk_attrs)
first_non_attr_kind = decl->kind;
break;
case cdk_attrs:
decl = decl->declarator;
break;
case cdk_id:
loc = decl->id_loc;
if (decl->u.id)
name = decl->u.id;
if (first_non_attr_kind == cdk_attrs)
first_non_attr_kind = decl->kind;
decl = 0;
break;
default:
gcc_unreachable ();
}
if (name == 0)
{
gcc_assert (decl_context == PARM
|| decl_context == TYPENAME
|| (decl_context == FIELD
&& declarator->kind == cdk_id));
gcc_assert (!initialized);
}
}
/* A function definition's declarator must have the form of
a function declarator. */
if (funcdef_flag && !funcdef_syntax)
return 0;
/* If this looks like a function definition, make it one,
even if it occurs where parms are expected.
Then store_parm_decls will reject it and not use it as a parm. */
if (decl_context == NORMAL && !funcdef_flag && current_scope->parm_flag)
decl_context = PARM;
if (declspecs->deprecated_p && deprecated_state != DEPRECATED_SUPPRESS)
warn_deprecated_use (declspecs->type, declspecs->decl_attr);
if ((decl_context == NORMAL || decl_context == FIELD)
&& current_scope == file_scope
&& variably_modified_type_p (type, NULL_TREE))
{
if (name)
error_at (loc, "variably modified %qE at file scope", name);
else
error_at (loc, "variably modified field at file scope");
type = integer_type_node;
}
size_varies = C_TYPE_VARIABLE_SIZE (type) != 0;
/* Diagnose defaulting to "int". */
if (declspecs->default_int_p && !in_system_header_at (input_location))
{
/* Issue a warning if this is an ISO C 99 program or if
-Wreturn-type and this is a function, or if -Wimplicit;
prefer the former warning since it is more explicit. */
if ((warn_implicit_int || warn_return_type || flag_isoc99)
&& funcdef_flag)
warn_about_return_type = 1;
else
{
if (name)
warn_defaults_to (loc, OPT_Wimplicit_int,
"type defaults to %<int%> in declaration "
"of %qE", name);
else
warn_defaults_to (loc, OPT_Wimplicit_int,
"type defaults to %<int%> in type name");
}
}
/* Adjust the type if a bit-field is being declared,
-funsigned-bitfields applied and the type is not explicitly
"signed". */
if (bitfield && !flag_signed_bitfields && !declspecs->explicit_signed_p
&& TREE_CODE (type) == INTEGER_TYPE)
type = unsigned_type_for (type);
/* Figure out the type qualifiers for the declaration. There are
two ways a declaration can become qualified. One is something
like `const int i' where the `const' is explicit. Another is
something like `typedef const int CI; CI i' where the type of the
declaration contains the `const'. A third possibility is that
there is a type qualifier on the element type of a typedefed
array type, in which case we should extract that qualifier so
that c_apply_type_quals_to_decl receives the full list of
qualifiers to work with (C90 is not entirely clear about whether
duplicate qualifiers should be diagnosed in this case, but it
seems most appropriate to do so). */
element_type = strip_array_types (type);
constp = declspecs->const_p + TYPE_READONLY (element_type);
restrictp = declspecs->restrict_p + TYPE_RESTRICT (element_type);
volatilep = declspecs->volatile_p + TYPE_VOLATILE (element_type);
atomicp = declspecs->atomic_p + TYPE_ATOMIC (element_type);
as1 = declspecs->address_space;
as2 = TYPE_ADDR_SPACE (element_type);
address_space = ADDR_SPACE_GENERIC_P (as1)? as2 : as1;
if (constp > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<const%>");
if (restrictp > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<restrict%>");
if (volatilep > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<volatile%>");
if (atomicp > 1)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<_Atomic%>");
if (!ADDR_SPACE_GENERIC_P (as1) && !ADDR_SPACE_GENERIC_P (as2) && as1 != as2)
error_at (loc, "conflicting named address spaces (%s vs %s)",
c_addr_space_name (as1), c_addr_space_name (as2));
if ((TREE_CODE (type) == ARRAY_TYPE
|| first_non_attr_kind == cdk_array)
&& TYPE_QUALS (element_type))
type = TYPE_MAIN_VARIANT (type);
type_quals = ((constp ? TYPE_QUAL_CONST : 0)
| (restrictp ? TYPE_QUAL_RESTRICT : 0)
| (volatilep ? TYPE_QUAL_VOLATILE : 0)
| (atomicp ? TYPE_QUAL_ATOMIC : 0)
| ENCODE_QUAL_ADDR_SPACE (address_space));
/* Applying the _Atomic qualifier to an array type (through the use
of typedefs or typeof) must be detected here. If the qualifier
is introduced later, any appearance of applying it to an array is
actually applying it to an element of that array. */
if (atomicp && TREE_CODE (type) == ARRAY_TYPE)
error_at (loc, "%<_Atomic%>-qualified array type");
/* Warn about storage classes that are invalid for certain
kinds of declarations (parameters, typenames, etc.). */
if (funcdef_flag
&& (threadp
|| storage_class == csc_auto
|| storage_class == csc_register
|| storage_class == csc_typedef))
{
if (storage_class == csc_auto)
pedwarn (loc,
(current_scope == file_scope) ? 0 : OPT_Wpedantic,
"function definition declared %<auto%>");
if (storage_class == csc_register)
error_at (loc, "function definition declared %<register%>");
if (storage_class == csc_typedef)
error_at (loc, "function definition declared %<typedef%>");
if (threadp)
error_at (loc, "function definition declared %qs",
declspecs->thread_gnu_p ? "__thread" : "_Thread_local");
threadp = false;
if (storage_class == csc_auto
|| storage_class == csc_register
|| storage_class == csc_typedef)
storage_class = csc_none;
}
else if (decl_context != NORMAL && (storage_class != csc_none || threadp))
{
if (decl_context == PARM && storage_class == csc_register)
;
else
{
switch (decl_context)
{
case FIELD:
if (name)
error_at (loc, "storage class specified for structure "
"field %qE", name);
else
error_at (loc, "storage class specified for structure field");
break;
case PARM:
if (name)
error_at (loc, "storage class specified for parameter %qE",
name);
else
error_at (loc, "storage class specified for unnamed parameter");
break;
default:
error_at (loc, "storage class specified for typename");
break;
}
storage_class = csc_none;
threadp = false;
}
}
else if (storage_class == csc_extern
&& initialized
&& !funcdef_flag)
{
/* 'extern' with initialization is invalid if not at file scope. */
if (current_scope == file_scope)
{
/* It is fine to have 'extern const' when compiling at C
and C++ intersection. */
if (!(warn_cxx_compat && constp))
warning_at (loc, 0, "%qE initialized and declared %<extern%>",
name);
}
else
error_at (loc, "%qE has both %<extern%> and initializer", name);
}
else if (current_scope == file_scope)
{
if (storage_class == csc_auto)
error_at (loc, "file-scope declaration of %qE specifies %<auto%>",
name);
if (pedantic && storage_class == csc_register)
pedwarn (input_location, OPT_Wpedantic,
"file-scope declaration of %qE specifies %<register%>", name);
}
else
{
if (storage_class == csc_extern && funcdef_flag)
error_at (loc, "nested function %qE declared %<extern%>", name);
else if (threadp && storage_class == csc_none)
{
error_at (loc, "function-scope %qE implicitly auto and declared "
"%qs", name,
declspecs->thread_gnu_p ? "__thread" : "_Thread_local");
threadp = false;
}
}
/* Now figure out the structure of the declarator proper.
Descend through it, creating more complex types, until we reach
the declared identifier (or NULL_TREE, in an absolute declarator).
At each stage we maintain an unqualified version of the type
together with any qualifiers that should be applied to it with
c_build_qualified_type; this way, array types including
multidimensional array types are first built up in unqualified
form and then the qualified form is created with
TYPE_MAIN_VARIANT pointing to the unqualified form. */
while (declarator && declarator->kind != cdk_id)
{
if (type == error_mark_node)
{
declarator = declarator->declarator;
continue;
}
/* Each level of DECLARATOR is either a cdk_array (for ...[..]),
a cdk_pointer (for *...),
a cdk_function (for ...(...)),
a cdk_attrs (for nested attributes),
or a cdk_id (for the name being declared
or the place in an absolute declarator
where the name was omitted).
For the last case, we have just exited the loop.
At this point, TYPE is the type of elements of an array,
or for a function to return, or for a pointer to point to.
After this sequence of ifs, TYPE is the type of the
array or function or pointer, and DECLARATOR has had its
outermost layer removed. */
if (array_ptr_quals != TYPE_UNQUALIFIED
|| array_ptr_attrs != NULL_TREE
|| array_parm_static)
{
/* Only the innermost declarator (making a parameter be of
array type which is converted to pointer type)
may have static or type qualifiers. */
error_at (loc, "static or type qualifiers in non-parameter array declarator");
array_ptr_quals = TYPE_UNQUALIFIED;
array_ptr_attrs = NULL_TREE;
array_parm_static = 0;
}
switch (declarator->kind)
{
case cdk_attrs:
{
/* A declarator with embedded attributes. */
tree attrs = declarator->u.attrs;
const struct c_declarator *inner_decl;
int attr_flags = 0;
declarator = declarator->declarator;
inner_decl = declarator;
while (inner_decl->kind == cdk_attrs)
inner_decl = inner_decl->declarator;
if (inner_decl->kind == cdk_id)
attr_flags |= (int) ATTR_FLAG_DECL_NEXT;
else if (inner_decl->kind == cdk_function)
attr_flags |= (int) ATTR_FLAG_FUNCTION_NEXT;
else if (inner_decl->kind == cdk_array)
attr_flags |= (int) ATTR_FLAG_ARRAY_NEXT;
returned_attrs = decl_attributes (&type,
chainon (returned_attrs, attrs),
attr_flags);
break;
}
case cdk_array:
{
tree itype = NULL_TREE;
tree size = declarator->u.array.dimen;
/* The index is a signed object `sizetype' bits wide. */
tree index_type = c_common_signed_type (sizetype);
array_ptr_quals = declarator->u.array.quals;
array_ptr_attrs = declarator->u.array.attrs;
array_parm_static = declarator->u.array.static_p;
array_parm_vla_unspec_p = declarator->u.array.vla_unspec_p;
declarator = declarator->declarator;
/* Check for some types that there cannot be arrays of. */
if (VOID_TYPE_P (type))
{
if (name)
error_at (loc, "declaration of %qE as array of voids", name);
else
error_at (loc, "declaration of type name as array of voids");
type = error_mark_node;
}
if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (name)
error_at (loc, "declaration of %qE as array of functions",
name);
else
error_at (loc, "declaration of type name as array of "
"functions");
type = error_mark_node;
}
if (pedantic && !in_system_header_at (input_location)
&& flexible_array_type_p (type))
pedwarn (loc, OPT_Wpedantic,
"invalid use of structure with flexible array member");
if (size == error_mark_node)
type = error_mark_node;
if (type == error_mark_node)
continue;
/* If size was specified, set ITYPE to a range-type for
that size. Otherwise, ITYPE remains null. finish_decl
may figure it out from an initial value. */
if (size)
{
bool size_maybe_const = true;
bool size_int_const = (TREE_CODE (size) == INTEGER_CST
&& !TREE_OVERFLOW (size));
bool this_size_varies = false;
/* Strip NON_LVALUE_EXPRs since we aren't using as an
lvalue. */
STRIP_TYPE_NOPS (size);
if (!INTEGRAL_TYPE_P (TREE_TYPE (size)))
{
if (name)
error_at (loc, "size of array %qE has non-integer type",
name);
else
error_at (loc,
"size of unnamed array has non-integer type");
size = integer_one_node;
}
size = c_fully_fold (size, false, &size_maybe_const);
if (pedantic && size_maybe_const && integer_zerop (size))
{
if (name)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids zero-size array %qE", name);
else
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids zero-size array");
}
if (TREE_CODE (size) == INTEGER_CST && size_maybe_const)
{
constant_expression_warning (size);
if (tree_int_cst_sgn (size) < 0)
{
if (name)
error_at (loc, "size of array %qE is negative", name);
else
error_at (loc, "size of unnamed array is negative");
size = integer_one_node;
}
/* Handle a size folded to an integer constant but
not an integer constant expression. */
if (!size_int_const)
{
/* If this is a file scope declaration of an
ordinary identifier, this is invalid code;
diagnosing it here and not subsequently
treating the type as variable-length avoids
more confusing diagnostics later. */
if ((decl_context == NORMAL || decl_context == FIELD)
&& current_scope == file_scope)
pedwarn (input_location, 0,
"variably modified %qE at file scope",
name);
else
this_size_varies = size_varies = true;
warn_variable_length_array (name, size);
}
}
else if ((decl_context == NORMAL || decl_context == FIELD)
&& current_scope == file_scope)
{
error_at (loc, "variably modified %qE at file scope", name);
size = integer_one_node;
}
else
{
/* Make sure the array size remains visibly
nonconstant even if it is (eg) a const variable
with known value. */
this_size_varies = size_varies = true;
warn_variable_length_array (name, size);
if (flag_sanitize & SANITIZE_VLA
&& decl_context == NORMAL
&& do_ubsan_in_current_function ())
{
/* Evaluate the array size only once. */
size = c_save_expr (size);
size = c_fully_fold (size, false, NULL);
size = fold_build2 (COMPOUND_EXPR, TREE_TYPE (size),
ubsan_instrument_vla (loc, size),
size);
}
}
if (integer_zerop (size) && !this_size_varies)
{
/* A zero-length array cannot be represented with
an unsigned index type, which is what we'll
get with build_index_type. Create an
open-ended range instead. */
itype = build_range_type (sizetype, size, NULL_TREE);
}
else
{
/* Arrange for the SAVE_EXPR on the inside of the
MINUS_EXPR, which allows the -1 to get folded
with the +1 that happens when building TYPE_SIZE. */
if (size_varies)
size = save_expr (size);
if (this_size_varies && TREE_CODE (size) == INTEGER_CST)
size = build2 (COMPOUND_EXPR, TREE_TYPE (size),
integer_zero_node, size);
/* Compute the maximum valid index, that is, size
- 1. Do the calculation in index_type, so that
if it is a variable the computations will be
done in the proper mode. */
itype = fold_build2_loc (loc, MINUS_EXPR, index_type,
convert (index_type, size),
convert (index_type,
size_one_node));
/* The above overflows when size does not fit
in index_type.
??? While a size of INT_MAX+1 technically shouldn't
cause an overflow (because we subtract 1), handling
this case seems like an unnecessary complication. */
if (TREE_CODE (size) == INTEGER_CST
&& !int_fits_type_p (size, index_type))
{
if (name)
error_at (loc, "size of array %qE is too large",
name);
else
error_at (loc, "size of unnamed array is too large");
type = error_mark_node;
continue;
}
itype = build_index_type (itype);
}
if (this_size_varies)
{
if (*expr)
*expr = build2 (COMPOUND_EXPR, TREE_TYPE (size),
*expr, size);
else
*expr = size;
*expr_const_operands &= size_maybe_const;
}
}
else if (decl_context == FIELD)
{
bool flexible_array_member = false;
if (array_parm_vla_unspec_p)
/* Field names can in fact have function prototype
scope so [*] is disallowed here through making
the field variably modified, not through being
something other than a declaration with function
prototype scope. */
size_varies = true;
else
{
const struct c_declarator *t = declarator;
while (t->kind == cdk_attrs)
t = t->declarator;
flexible_array_member = (t->kind == cdk_id);
}
if (flexible_array_member
&& !in_system_header_at (input_location))
pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not "
"support flexible array members");
/* ISO C99 Flexible array members are effectively
identical to GCC's zero-length array extension. */
if (flexible_array_member || array_parm_vla_unspec_p)
itype = build_range_type (sizetype, size_zero_node,
NULL_TREE);
}
else if (decl_context == PARM)
{
if (array_parm_vla_unspec_p)
{
itype = build_range_type (sizetype, size_zero_node, NULL_TREE);
size_varies = true;
}
}
else if (decl_context == TYPENAME)
{
if (array_parm_vla_unspec_p)
{
/* C99 6.7.5.2p4 */
warning (0, "%<[*]%> not in a declaration");
/* We use this to avoid messing up with incomplete
array types of the same type, that would
otherwise be modified below. */
itype = build_range_type (sizetype, size_zero_node,
NULL_TREE);
size_varies = true;
}
}
/* Complain about arrays of incomplete types. */
if (!COMPLETE_TYPE_P (type))
{
error_at (loc, "array type has incomplete element type %qT",
type);
type = error_mark_node;
}
else
/* When itype is NULL, a shared incomplete array type is
returned for all array of a given type. Elsewhere we
make sure we don't complete that type before copying
it, but here we want to make sure we don't ever
modify the shared type, so we gcc_assert (itype)
below. */
{
addr_space_t as = DECODE_QUAL_ADDR_SPACE (type_quals);
if (!ADDR_SPACE_GENERIC_P (as) && as != TYPE_ADDR_SPACE (type))
type = build_qualified_type (type,
ENCODE_QUAL_ADDR_SPACE (as));
type = build_array_type (type, itype);
}
if (type != error_mark_node)
{
if (size_varies)
{
/* It is ok to modify type here even if itype is
NULL: if size_varies, we're in a
multi-dimensional array and the inner type has
variable size, so the enclosing shared array type
must too. */
if (size && TREE_CODE (size) == INTEGER_CST)
type
= build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
C_TYPE_VARIABLE_SIZE (type) = 1;
}
/* The GCC extension for zero-length arrays differs from
ISO flexible array members in that sizeof yields
zero. */
if (size && integer_zerop (size))
{
gcc_assert (itype);
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_SIZE (type) = bitsize_zero_node;
TYPE_SIZE_UNIT (type) = size_zero_node;
SET_TYPE_STRUCTURAL_EQUALITY (type);
}
if (array_parm_vla_unspec_p)
{
gcc_assert (itype);
/* The type is complete. C99 6.7.5.2p4 */
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_SIZE (type) = bitsize_zero_node;
TYPE_SIZE_UNIT (type) = size_zero_node;
SET_TYPE_STRUCTURAL_EQUALITY (type);
}
}
if (decl_context != PARM
&& (array_ptr_quals != TYPE_UNQUALIFIED
|| array_ptr_attrs != NULL_TREE
|| array_parm_static))
{
error_at (loc, "static or type qualifiers in non-parameter array declarator");
array_ptr_quals = TYPE_UNQUALIFIED;
array_ptr_attrs = NULL_TREE;
array_parm_static = 0;
}
break;
}
case cdk_function:
{
/* Say it's a definition only for the declarator closest
to the identifier, apart possibly from some
attributes. */
bool really_funcdef = false;
tree arg_types;
if (funcdef_flag)
{
const struct c_declarator *t = declarator->declarator;
while (t->kind == cdk_attrs)
t = t->declarator;
really_funcdef = (t->kind == cdk_id);
}
/* Declaring a function type. Make sure we have a valid
type for the function to return. */
if (type == error_mark_node)
continue;
size_varies = false;
/* Warn about some types functions can't return. */
if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (name)
error_at (loc, "%qE declared as function returning a "
"function", name);
else
error_at (loc, "type name declared as function "
"returning a function");
type = integer_type_node;
}
if (TREE_CODE (type) == ARRAY_TYPE)
{
if (name)
error_at (loc, "%qE declared as function returning an array",
name);
else
error_at (loc, "type name declared as function returning "
"an array");
type = integer_type_node;
}
errmsg = targetm.invalid_return_type (type);
if (errmsg)
{
error (errmsg);
type = integer_type_node;
}
/* Construct the function type and go to the next
inner layer of declarator. */
arg_info = declarator->u.arg_info;
arg_types = grokparms (arg_info, really_funcdef);
/* Type qualifiers before the return type of the function
qualify the return type, not the function type. */
if (type_quals)
{
/* Type qualifiers on a function return type are
normally permitted by the standard but have no
effect, so give a warning at -Wreturn-type.
Qualifiers on a void return type are banned on
function definitions in ISO C; GCC used to used
them for noreturn functions. */
if (VOID_TYPE_P (type) && really_funcdef)
pedwarn (loc, 0,
"function definition has qualified void return type");
else
warning_at (loc, OPT_Wignored_qualifiers,
"type qualifiers ignored on function return type");
type = c_build_qualified_type (type, type_quals);
}
type_quals = TYPE_UNQUALIFIED;
type = build_function_type (type, arg_types);
declarator = declarator->declarator;
/* Set the TYPE_CONTEXTs for each tagged type which is local to
the formal parameter list of this FUNCTION_TYPE to point to
the FUNCTION_TYPE node itself. */
{
c_arg_tag *tag;
unsigned ix;
FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag)
TYPE_CONTEXT (tag->type) = type;
}
break;
}
case cdk_pointer:
{
/* Merge any constancy or volatility into the target type
for the pointer. */
if ((type_quals & TYPE_QUAL_ATOMIC)
&& TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
&& type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals);
size_varies = false;
/* When the pointed-to type involves components of variable size,
care must be taken to ensure that the size evaluation code is
emitted early enough to dominate all the possible later uses
and late enough for the variables on which it depends to have
been assigned.
This is expected to happen automatically when the pointed-to
type has a name/declaration of it's own, but special attention
is required if the type is anonymous.
We handle the NORMAL and FIELD contexts here by attaching an
artificial TYPE_DECL to such pointed-to type. This forces the
sizes evaluation at a safe point and ensures it is not deferred
until e.g. within a deeper conditional context.
We expect nothing to be needed here for PARM or TYPENAME.
Pushing a TYPE_DECL at this point for TYPENAME would actually
be incorrect, as we might be in the middle of an expression
with side effects on the pointed-to type size "arguments" prior
to the pointer declaration point and the fake TYPE_DECL in the
enclosing context would force the size evaluation prior to the
side effects. */
if (!TYPE_NAME (type)
&& (decl_context == NORMAL || decl_context == FIELD)
&& variably_modified_type_p (type, NULL_TREE))
{
tree decl = build_decl (loc, TYPE_DECL, NULL_TREE, type);
DECL_ARTIFICIAL (decl) = 1;
pushdecl (decl);
finish_decl (decl, loc, NULL_TREE, NULL_TREE, NULL_TREE);
TYPE_NAME (type) = decl;
}
type = c_build_pointer_type (type);
/* Process type qualifiers (such as const or volatile)
that were given inside the `*'. */
type_quals = declarator->u.pointer_quals;
declarator = declarator->declarator;
break;
}
default:
gcc_unreachable ();
}
}
*decl_attrs = chainon (returned_attrs, *decl_attrs);
/* Now TYPE has the actual type, apart from any qualifiers in
TYPE_QUALS. */
/* Warn about address space used for things other than static memory or
pointers. */
address_space = DECODE_QUAL_ADDR_SPACE (type_quals);
if (!ADDR_SPACE_GENERIC_P (address_space))
{
if (decl_context == NORMAL)
{
switch (storage_class)
{
case csc_auto:
error ("%qs combined with %<auto%> qualifier for %qE",
c_addr_space_name (address_space), name);
break;
case csc_register:
error ("%qs combined with %<register%> qualifier for %qE",
c_addr_space_name (address_space), name);
break;
case csc_none:
if (current_function_scope)
{
error ("%qs specified for auto variable %qE",
c_addr_space_name (address_space), name);
break;
}
break;
case csc_static:
case csc_extern:
case csc_typedef:
break;
default:
gcc_unreachable ();
}
}
else if (decl_context == PARM && TREE_CODE (type) != ARRAY_TYPE)
{
if (name)
error ("%qs specified for parameter %qE",
c_addr_space_name (address_space), name);
else
error ("%qs specified for unnamed parameter",
c_addr_space_name (address_space));
}
else if (decl_context == FIELD)
{
if (name)
error ("%qs specified for structure field %qE",
c_addr_space_name (address_space), name);
else
error ("%qs specified for structure field",
c_addr_space_name (address_space));
}
}
/* Check the type and width of a bit-field. */
if (bitfield)
{
check_bitfield_type_and_width (&type, width, name);
/* C11 makes it implementation-defined (6.7.2.1#5) whether
atomic types are permitted for bit-fields; we have no code to
make bit-field accesses atomic, so disallow them. */
if (type_quals & TYPE_QUAL_ATOMIC)
{
if (name)
error ("bit-field %qE has atomic type", name);
else
error ("bit-field has atomic type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
}
/* Reject invalid uses of _Alignas. */
if (declspecs->alignas_p)
{
if (storage_class == csc_typedef)
error_at (loc, "alignment specified for typedef %qE", name);
else if (storage_class == csc_register)
error_at (loc, "alignment specified for %<register%> object %qE",
name);
else if (decl_context == PARM)
{
if (name)
error_at (loc, "alignment specified for parameter %qE", name);
else
error_at (loc, "alignment specified for unnamed parameter");
}
else if (bitfield)
{
if (name)
error_at (loc, "alignment specified for bit-field %qE", name);
else
error_at (loc, "alignment specified for unnamed bit-field");
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
error_at (loc, "alignment specified for function %qE", name);
else if (declspecs->align_log != -1)
{
alignas_align = 1U << declspecs->align_log;
if (alignas_align < min_align_of_type (type))
{
if (name)
error_at (loc, "%<_Alignas%> specifiers cannot reduce "
"alignment of %qE", name);
else
error_at (loc, "%<_Alignas%> specifiers cannot reduce "
"alignment of unnamed field");
alignas_align = 0;
}
}
}
/* Did array size calculations overflow or does the array cover more
than half of the address-space? */
if (TREE_CODE (type) == ARRAY_TYPE
&& COMPLETE_TYPE_P (type)
&& TREE_CODE (TYPE_SIZE_UNIT (type)) == INTEGER_CST
&& ! valid_constant_size_p (TYPE_SIZE_UNIT (type)))
{
if (name)
error_at (loc, "size of array %qE is too large", name);
else
error_at (loc, "size of unnamed array is too large");
/* If we proceed with the array type as it is, we'll eventually
crash in tree_to_[su]hwi(). */
type = error_mark_node;
}
/* If this is declaring a typedef name, return a TYPE_DECL. */
if (storage_class == csc_typedef)
{
tree decl;
if ((type_quals & TYPE_QUAL_ATOMIC)
&& TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
&& type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals);
decl = build_decl (declarator->id_loc,
TYPE_DECL, declarator->u.id, type);
if (declspecs->explicit_signed_p)
C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1;
if (declspecs->inline_p)
pedwarn (loc, 0,"typedef %q+D declared %<inline%>", decl);
if (declspecs->noreturn_p)
pedwarn (loc, 0,"typedef %q+D declared %<_Noreturn%>", decl);
if (warn_cxx_compat && declarator->u.id != NULL_TREE)
{
struct c_binding *b = I_TAG_BINDING (declarator->u.id);
if (b != NULL
&& b->decl != NULL_TREE
&& (B_IN_CURRENT_SCOPE (b)
|| (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b)))
&& TYPE_MAIN_VARIANT (b->decl) != TYPE_MAIN_VARIANT (type))
{
warning_at (declarator->id_loc, OPT_Wc___compat,
("using %qD as both a typedef and a tag is "
"invalid in C++"),
decl);
if (b->locus != UNKNOWN_LOCATION)
inform (b->locus, "originally defined here");
}
}
return decl;
}
/* If this is a type name (such as, in a cast or sizeof),
compute the type and return it now. */
if (decl_context == TYPENAME)
{
/* Note that the grammar rejects storage classes in typenames
and fields. */
gcc_assert (storage_class == csc_none && !threadp
&& !declspecs->inline_p && !declspecs->noreturn_p);
if ((type_quals & TYPE_QUAL_ATOMIC)
&& TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE
&& type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids const or volatile function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals);
return type;
}
if (pedantic && decl_context == FIELD
&& variably_modified_type_p (type, NULL_TREE))
{
/* C99 6.7.2.1p8 */
pedwarn (loc, OPT_Wpedantic, "a member of a structure or union cannot "
"have a variably modified type");
}
/* Aside from typedefs and type names (handle above),
`void' at top level (not within pointer)
is allowed only in public variables.
We don't complain about parms either, but that is because
a better error message can be made later. */
if (VOID_TYPE_P (type) && decl_context != PARM
&& !((decl_context != FIELD && TREE_CODE (type) != FUNCTION_TYPE)
&& (storage_class == csc_extern
|| (current_scope == file_scope
&& !(storage_class == csc_static
|| storage_class == csc_register)))))
{
error_at (loc, "variable or field %qE declared void", name);
type = integer_type_node;
}
/* Now create the decl, which may be a VAR_DECL, a PARM_DECL
or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */
{
tree decl;
if (decl_context == PARM)
{
tree promoted_type;
bool array_parameter_p = false;
/* A parameter declared as an array of T is really a pointer to T.
One declared as a function is really a pointer to a function. */
if (TREE_CODE (type) == ARRAY_TYPE)
{
/* Transfer const-ness of array into that of type pointed to. */
type = TREE_TYPE (type);
if (type_quals)
type = c_build_qualified_type (type, type_quals);
type = c_build_pointer_type (type);
type_quals = array_ptr_quals;
if (type_quals)
type = c_build_qualified_type (type, type_quals);
/* We don't yet implement attributes in this context. */
if (array_ptr_attrs != NULL_TREE)
warning_at (loc, OPT_Wattributes,
"attributes in parameter array declarator ignored");
size_varies = false;
array_parameter_p = true;
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (type_quals & TYPE_QUAL_ATOMIC)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (type_quals)
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
if (type_quals)
type = c_build_qualified_type (type, type_quals);
type = c_build_pointer_type (type);
type_quals = TYPE_UNQUALIFIED;
}
else if (type_quals)
type = c_build_qualified_type (type, type_quals);
decl = build_decl (declarator->id_loc,
PARM_DECL, declarator->u.id, type);
if (size_varies)
C_DECL_VARIABLE_SIZE (decl) = 1;
C_ARRAY_PARAMETER (decl) = array_parameter_p;
/* Compute the type actually passed in the parmlist,
for the case where there is no prototype.
(For example, shorts and chars are passed as ints.)
When there is a prototype, this is overridden later. */
if (type == error_mark_node)
promoted_type = type;
else
promoted_type = c_type_promotes_to (type);
DECL_ARG_TYPE (decl) = promoted_type;
if (declspecs->inline_p)
pedwarn (loc, 0, "parameter %q+D declared %<inline%>", decl);
if (declspecs->noreturn_p)
pedwarn (loc, 0, "parameter %q+D declared %<_Noreturn%>", decl);
}
else if (decl_context == FIELD)
{
/* Note that the grammar rejects storage classes in typenames
and fields. */
gcc_assert (storage_class == csc_none && !threadp
&& !declspecs->inline_p && !declspecs->noreturn_p);
/* Structure field. It may not be a function. */
if (TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc, "field %qE declared as a function", name);
type = build_pointer_type (type);
}
else if (TREE_CODE (type) != ERROR_MARK
&& !COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (type))
{
if (name)
error_at (loc, "field %qE has incomplete type", name);
else
error_at (loc, "unnamed field has incomplete type");
type = error_mark_node;
}
else if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type) == NULL_TREE)
{
/* We have a flexible array member through a typedef.
Set suitable range. Whether this is a correct position
for a flexible array member will be determined elsewhere. */
if (!in_system_header_at (input_location))
pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not "
"support flexible array members");
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_DOMAIN (type) = build_range_type (sizetype, size_zero_node,
NULL_TREE);
}
type = c_build_qualified_type (type, type_quals);
decl = build_decl (declarator->id_loc,
FIELD_DECL, declarator->u.id, type);
DECL_NONADDRESSABLE_P (decl) = bitfield;
if (bitfield && !declarator->u.id)
TREE_NO_WARNING (decl) = 1;
if (size_varies)
C_DECL_VARIABLE_SIZE (decl) = 1;
}
else if (TREE_CODE (type) == FUNCTION_TYPE)
{
if (storage_class == csc_register || threadp)
{
error_at (loc, "invalid storage class for function %qE", name);
}
else if (current_scope != file_scope)
{
/* Function declaration not at file scope. Storage
classes other than `extern' are not allowed, C99
6.7.1p5, and `extern' makes no difference. However,
GCC allows 'auto', perhaps with 'inline', to support
nested functions. */
if (storage_class == csc_auto)
pedwarn (loc, OPT_Wpedantic,
"invalid storage class for function %qE", name);
else if (storage_class == csc_static)
{
error_at (loc, "invalid storage class for function %qE", name);
if (funcdef_flag)
storage_class = declspecs->storage_class = csc_none;
else
return 0;
}
}
decl = build_decl (declarator->id_loc,
FUNCTION_DECL, declarator->u.id, type);
decl = build_decl_attribute_variant (decl, decl_attr);
if (type_quals & TYPE_QUAL_ATOMIC)
{
error_at (loc,
"%<_Atomic%>-qualified function type");
type_quals &= ~TYPE_QUAL_ATOMIC;
}
else if (pedantic && type_quals && !DECL_IN_SYSTEM_HEADER (decl))
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids qualified function types");
/* Every function declaration is an external reference
(DECL_EXTERNAL) except for those which are not at file
scope and are explicitly declared "auto". This is
forbidden by standard C (C99 6.7.1p5) and is interpreted by
GCC to signify a forward declaration of a nested function. */
if (storage_class == csc_auto && current_scope != file_scope)
DECL_EXTERNAL (decl) = 0;
/* In C99, a function which is declared 'inline' with 'extern'
is not an external reference (which is confusing). It
means that the later definition of the function must be output
in this file, C99 6.7.4p6. In GNU C89, a function declared
'extern inline' is an external reference. */
else if (declspecs->inline_p && storage_class != csc_static)
DECL_EXTERNAL (decl) = ((storage_class == csc_extern)
== flag_gnu89_inline);
else
DECL_EXTERNAL (decl) = !initialized;
/* Record absence of global scope for `static' or `auto'. */
TREE_PUBLIC (decl)
= !(storage_class == csc_static || storage_class == csc_auto);
/* For a function definition, record the argument information
block where store_parm_decls will look for it. */
if (funcdef_flag)
current_function_arg_info = arg_info;
if (declspecs->default_int_p)
C_FUNCTION_IMPLICIT_INT (decl) = 1;
/* Record presence of `inline' and `_Noreturn', if it is
reasonable. */
if (flag_hosted && MAIN_NAME_P (declarator->u.id))
{
if (declspecs->inline_p)
pedwarn (loc, 0, "cannot inline function %<main%>");
if (declspecs->noreturn_p)
pedwarn (loc, 0, "%<main%> declared %<_Noreturn%>");
}
else
{
if (declspecs->inline_p)
/* Record that the function is declared `inline'. */
DECL_DECLARED_INLINE_P (decl) = 1;
if (declspecs->noreturn_p)
{
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 does not support %<_Noreturn%>");
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 does not support %<_Noreturn%>");
TREE_THIS_VOLATILE (decl) = 1;
}
}
}
else
{
/* It's a variable. */
/* An uninitialized decl with `extern' is a reference. */
int extern_ref = !initialized && storage_class == csc_extern;
type = c_build_qualified_type (type, type_quals);
/* C99 6.2.2p7: It is invalid (compile-time undefined
behavior) to create an 'extern' declaration for a
variable if there is a global declaration that is
'static' and the global declaration is not visible.
(If the static declaration _is_ currently visible,
the 'extern' declaration is taken to refer to that decl.) */
if (extern_ref && current_scope != file_scope)
{
tree global_decl = identifier_global_value (declarator->u.id);
tree visible_decl = lookup_name (declarator->u.id);
if (global_decl
&& global_decl != visible_decl
&& TREE_CODE (global_decl) == VAR_DECL
&& !TREE_PUBLIC (global_decl))
error_at (loc, "variable previously declared %<static%> "
"redeclared %<extern%>");
}
decl = build_decl (declarator->id_loc,
VAR_DECL, declarator->u.id, type);
if (size_varies)
C_DECL_VARIABLE_SIZE (decl) = 1;
if (declspecs->inline_p)
pedwarn (loc, 0, "variable %q+D declared %<inline%>", decl);
if (declspecs->noreturn_p)
pedwarn (loc, 0, "variable %q+D declared %<_Noreturn%>", decl);
/* At file scope, an initialized extern declaration may follow
a static declaration. In that case, DECL_EXTERNAL will be
reset later in start_decl. */
DECL_EXTERNAL (decl) = (storage_class == csc_extern);
/* At file scope, the presence of a `static' or `register' storage
class specifier, or the absence of all storage class specifiers
makes this declaration a definition (perhaps tentative). Also,
the absence of `static' makes it public. */
if (current_scope == file_scope)
{
TREE_PUBLIC (decl) = storage_class != csc_static;
TREE_STATIC (decl) = !extern_ref;
}
/* Not at file scope, only `static' makes a static definition. */
else
{
TREE_STATIC (decl) = (storage_class == csc_static);
TREE_PUBLIC (decl) = extern_ref;
}
if (threadp)
set_decl_tls_model (decl, decl_default_tls_model (decl));
}
if ((storage_class == csc_extern
|| (storage_class == csc_none
&& TREE_CODE (type) == FUNCTION_TYPE
&& !funcdef_flag))
&& variably_modified_type_p (type, NULL_TREE))
{
/* C99 6.7.5.2p2 */
if (TREE_CODE (type) == FUNCTION_TYPE)
error_at (loc, "non-nested function with variably modified type");
else
error_at (loc, "object with variably modified type must have "
"no linkage");
}
/* Record `register' declaration for warnings on &
and in case doing stupid register allocation. */
if (storage_class == csc_register)
{
C_DECL_REGISTER (decl) = 1;
DECL_REGISTER (decl) = 1;
}
/* Record constancy and volatility. */
c_apply_type_quals_to_decl (type_quals, decl);
/* Apply _Alignas specifiers. */
if (alignas_align)
{
DECL_ALIGN (decl) = alignas_align * BITS_PER_UNIT;
DECL_USER_ALIGN (decl) = 1;
}
/* If a type has volatile components, it should be stored in memory.
Otherwise, the fact that those components are volatile
will be ignored, and would even crash the compiler.
Of course, this only makes sense on VAR,PARM, and RESULT decl's. */
if (C_TYPE_FIELDS_VOLATILE (TREE_TYPE (decl))
&& (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == PARM_DECL
|| TREE_CODE (decl) == RESULT_DECL))
{
/* It is not an error for a structure with volatile fields to
be declared register, but reset DECL_REGISTER since it
cannot actually go in a register. */
int was_reg = C_DECL_REGISTER (decl);
C_DECL_REGISTER (decl) = 0;
DECL_REGISTER (decl) = 0;
c_mark_addressable (decl);
C_DECL_REGISTER (decl) = was_reg;
}
/* This is the earliest point at which we might know the assembler
name of a variable. Thus, if it's known before this, die horribly. */
gcc_assert (!DECL_ASSEMBLER_NAME_SET_P (decl));
if (warn_cxx_compat
&& TREE_CODE (decl) == VAR_DECL
&& TREE_PUBLIC (decl)
&& TREE_STATIC (decl)
&& (TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (decl)) == UNION_TYPE
|| TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE)
&& TYPE_NAME (TREE_TYPE (decl)) == NULL_TREE)
warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat,
("non-local variable %qD with anonymous type is "
"questionable in C++"),
decl);
return decl;
}
}
/* Decode the parameter-list info for a function type or function definition.
The argument is the value returned by `get_parm_info' (or made in c-parse.c
if there is an identifier list instead of a parameter decl list).
These two functions are separate because when a function returns
or receives functions then each is called multiple times but the order
of calls is different. The last call to `grokparms' is always the one
that contains the formal parameter names of a function definition.
Return a list of arg types to use in the FUNCTION_TYPE for this function.
FUNCDEF_FLAG is true for a function definition, false for
a mere declaration. A nonempty identifier-list gets an error message
when FUNCDEF_FLAG is false. */
static tree
grokparms (struct c_arg_info *arg_info, bool funcdef_flag)
{
tree arg_types = arg_info->types;
if (funcdef_flag && arg_info->had_vla_unspec)
{
/* A function definition isn't function prototype scope C99 6.2.1p4. */
/* C99 6.7.5.2p4 */
error ("%<[*]%> not allowed in other than function prototype scope");
}
if (arg_types == 0 && !funcdef_flag
&& !in_system_header_at (input_location))
warning (OPT_Wstrict_prototypes,
"function declaration isn%'t a prototype");
if (arg_types == error_mark_node)
return 0; /* don't set TYPE_ARG_TYPES in this case */
else if (arg_types && TREE_CODE (TREE_VALUE (arg_types)) == IDENTIFIER_NODE)
{
if (!funcdef_flag)
{
pedwarn (input_location, 0, "parameter names (without types) in function declaration");
arg_info->parms = NULL_TREE;
}
else
arg_info->parms = arg_info->types;
arg_info->types = 0;
return 0;
}
else
{
tree parm, type, typelt;
unsigned int parmno;
const char *errmsg;
/* If there is a parameter of incomplete type in a definition,
this is an error. In a declaration this is valid, and a
struct or union type may be completed later, before any calls
or definition of the function. In the case where the tag was
first declared within the parameter list, a warning has
already been given. If a parameter has void type, then
however the function cannot be defined or called, so
warn. */
for (parm = arg_info->parms, typelt = arg_types, parmno = 1;
parm;
parm = DECL_CHAIN (parm), typelt = TREE_CHAIN (typelt), parmno++)
{
type = TREE_VALUE (typelt);
if (type == error_mark_node)
continue;
if (!COMPLETE_TYPE_P (type))
{
if (funcdef_flag)
{
if (DECL_NAME (parm))
error_at (input_location,
"parameter %u (%q+D) has incomplete type",
parmno, parm);
else
error_at (DECL_SOURCE_LOCATION (parm),
"parameter %u has incomplete type",
parmno);
TREE_VALUE (typelt) = error_mark_node;
TREE_TYPE (parm) = error_mark_node;
arg_types = NULL_TREE;
}
else if (VOID_TYPE_P (type))
{
if (DECL_NAME (parm))
warning_at (input_location, 0,
"parameter %u (%q+D) has void type",
parmno, parm);
else
warning_at (DECL_SOURCE_LOCATION (parm), 0,
"parameter %u has void type",
parmno);
}
}
errmsg = targetm.invalid_parameter_type (type);
if (errmsg)
{
error (errmsg);
TREE_VALUE (typelt) = error_mark_node;
TREE_TYPE (parm) = error_mark_node;
arg_types = NULL_TREE;
}
if (DECL_NAME (parm) && TREE_USED (parm))
warn_if_shadowing (parm);
}
return arg_types;
}
}
/* Allocate and initialize a c_arg_info structure from the parser's
obstack. */
struct c_arg_info *
build_arg_info (void)
{
struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info);
ret->parms = NULL_TREE;
ret->tags = NULL;
ret->types = NULL_TREE;
ret->others = NULL_TREE;
ret->pending_sizes = NULL;
ret->had_vla_unspec = 0;
return ret;
}
/* Take apart the current scope and return a c_arg_info structure with
info on a parameter list just parsed.
This structure is later fed to 'grokparms' and 'store_parm_decls'.
ELLIPSIS being true means the argument list ended in '...' so don't
append a sentinel (void_list_node) to the end of the type-list.
EXPR is NULL or an expression that needs to be evaluated for the
side effects of array size expressions in the parameters. */
struct c_arg_info *
get_parm_info (bool ellipsis, tree expr)
{
struct c_binding *b = current_scope->bindings;
struct c_arg_info *arg_info = build_arg_info ();
tree parms = 0;
vec<c_arg_tag, va_gc> *tags = NULL;
tree types = 0;
tree others = 0;
static bool explained_incomplete_types = false;
bool gave_void_only_once_err = false;
arg_info->had_vla_unspec = current_scope->had_vla_unspec;
/* The bindings in this scope must not get put into a block.
We will take care of deleting the binding nodes. */
current_scope->bindings = 0;
/* This function is only called if there was *something* on the
parameter list. */
gcc_assert (b);
/* A parameter list consisting solely of 'void' indicates that the
function takes no arguments. But if the 'void' is qualified
(by 'const' or 'volatile'), or has a storage class specifier
('register'), then the behavior is undefined; issue an error.
Typedefs for 'void' are OK (see DR#157). */
if (b->prev == 0 /* one binding */
&& TREE_CODE (b->decl) == PARM_DECL /* which is a parameter */
&& !DECL_NAME (b->decl) /* anonymous */
&& VOID_TYPE_P (TREE_TYPE (b->decl))) /* of void type */
{
if (TYPE_QUALS (TREE_TYPE (b->decl)) != TYPE_UNQUALIFIED
|| C_DECL_REGISTER (b->decl))
error ("%<void%> as only parameter may not be qualified");
/* There cannot be an ellipsis. */
if (ellipsis)
error ("%<void%> must be the only parameter");
arg_info->types = void_list_node;
return arg_info;
}
if (!ellipsis)
types = void_list_node;
/* Break up the bindings list into parms, tags, types, and others;
apply sanity checks; purge the name-to-decl bindings. */
while (b)
{
tree decl = b->decl;
tree type = TREE_TYPE (decl);
c_arg_tag tag;
const char *keyword;
switch (TREE_CODE (decl))
{
case PARM_DECL:
if (b->id)
{
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
}
/* Check for forward decls that never got their actual decl. */
if (TREE_ASM_WRITTEN (decl))
error ("parameter %q+D has just a forward declaration", decl);
/* Check for (..., void, ...) and issue an error. */
else if (VOID_TYPE_P (type) && !DECL_NAME (decl))
{
if (!gave_void_only_once_err)
{
error ("%<void%> must be the only parameter");
gave_void_only_once_err = true;
}
}
else
{
/* Valid parameter, add it to the list. */
DECL_CHAIN (decl) = parms;
parms = decl;
/* Since there is a prototype, args are passed in their
declared types. The back end may override this later. */
DECL_ARG_TYPE (decl) = type;
types = tree_cons (0, type, types);
}
break;
case ENUMERAL_TYPE: keyword = "enum"; goto tag;
case UNION_TYPE: keyword = "union"; goto tag;
case RECORD_TYPE: keyword = "struct"; goto tag;
tag:
/* Types may not have tag-names, in which case the type
appears in the bindings list with b->id NULL. */
if (b->id)
{
gcc_assert (I_TAG_BINDING (b->id) == b);
I_TAG_BINDING (b->id) = b->shadowed;
}
/* Warn about any struct, union or enum tags defined in a
parameter list. The scope of such types is limited to
the parameter list, which is rarely if ever desirable
(it's impossible to call such a function with type-
correct arguments). An anonymous union parm type is
meaningful as a GNU extension, so don't warn for that. */
if (TREE_CODE (decl) != UNION_TYPE || b->id != 0)
{
if (b->id)
/* The %s will be one of 'struct', 'union', or 'enum'. */
warning (0, "%<%s %E%> declared inside parameter list",
keyword, b->id);
else
/* The %s will be one of 'struct', 'union', or 'enum'. */
warning (0, "anonymous %s declared inside parameter list",
keyword);
if (!explained_incomplete_types)
{
warning (0, "its scope is only this definition or declaration,"
" which is probably not what you want");
explained_incomplete_types = true;
}
}
tag.id = b->id;
tag.type = decl;
vec_safe_push (tags, tag);
break;
case CONST_DECL:
case TYPE_DECL:
case FUNCTION_DECL:
/* CONST_DECLs appear here when we have an embedded enum,
and TYPE_DECLs appear here when we have an embedded struct
or union. No warnings for this - we already warned about the
type itself. FUNCTION_DECLs appear when there is an implicit
function declaration in the parameter list. */
/* When we reinsert this decl in the function body, we need
to reconstruct whether it was marked as nested. */
gcc_assert (TREE_CODE (decl) == FUNCTION_DECL
? b->nested
: !b->nested);
DECL_CHAIN (decl) = others;
others = decl;
/* fall through */
case ERROR_MARK:
/* error_mark_node appears here when we have an undeclared
variable. Just throw it away. */
if (b->id)
{
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
}
break;
/* Other things that might be encountered. */
case LABEL_DECL:
case VAR_DECL:
default:
gcc_unreachable ();
}
b = free_binding_and_advance (b);
}
arg_info->parms = parms;
arg_info->tags = tags;
arg_info->types = types;
arg_info->others = others;
arg_info->pending_sizes = expr;
return arg_info;
}
/* Get the struct, enum or union (CODE says which) with tag NAME.
Define the tag as a forward-reference with location LOC if it is
not defined. Return a c_typespec structure for the type
specifier. */
struct c_typespec
parser_xref_tag (location_t loc, enum tree_code code, tree name)
{
struct c_typespec ret;
tree ref;
location_t refloc;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
/* If a cross reference is requested, look up the type
already defined for this tag and return it. */
ref = lookup_tag (code, name, 0, &refloc);
/* If this is the right type of tag, return what we found.
(This reference will be shadowed by shadow_tag later if appropriate.)
If this is the wrong type of tag, do not return it. If it was the
wrong type in the same scope, we will have had an error
message already; if in a different scope and declaring
a name, pending_xref_error will give an error message; but if in a
different scope and not declaring a name, this tag should
shadow the previous declaration of a different type of tag, and
this would not work properly if we return the reference found.
(For example, with "struct foo" in an outer scope, "union foo;"
must shadow that tag with a new one of union type.) */
ret.kind = (ref ? ctsk_tagref : ctsk_tagfirstref);
if (ref && TREE_CODE (ref) == code)
{
if (C_TYPE_DEFINED_IN_STRUCT (ref)
&& loc != UNKNOWN_LOCATION
&& warn_cxx_compat)
{
switch (code)
{
case ENUMERAL_TYPE:
warning_at (loc, OPT_Wc___compat,
("enum type defined in struct or union "
"is not visible in C++"));
inform (refloc, "enum type defined here");
break;
case RECORD_TYPE:
warning_at (loc, OPT_Wc___compat,
("struct defined in struct or union "
"is not visible in C++"));
inform (refloc, "struct defined here");
break;
case UNION_TYPE:
warning_at (loc, OPT_Wc___compat,
("union defined in struct or union "
"is not visible in C++"));
inform (refloc, "union defined here");
break;
default:
gcc_unreachable();
}
}
ret.spec = ref;
return ret;
}
/* If no such tag is yet defined, create a forward-reference node
and record it as the "definition".
When a real declaration of this type is found,
the forward-reference will be altered into a real type. */
ref = make_node (code);
if (code == ENUMERAL_TYPE)
{
/* Give the type a default layout like unsigned int
to avoid crashing if it does not get defined. */
SET_TYPE_MODE (ref, TYPE_MODE (unsigned_type_node));
TYPE_ALIGN (ref) = TYPE_ALIGN (unsigned_type_node);
TYPE_USER_ALIGN (ref) = 0;
TYPE_UNSIGNED (ref) = 1;
TYPE_PRECISION (ref) = TYPE_PRECISION (unsigned_type_node);
TYPE_MIN_VALUE (ref) = TYPE_MIN_VALUE (unsigned_type_node);
TYPE_MAX_VALUE (ref) = TYPE_MAX_VALUE (unsigned_type_node);
}
pushtag (loc, name, ref);
ret.spec = ref;
return ret;
}
/* Get the struct, enum or union (CODE says which) with tag NAME.
Define the tag as a forward-reference if it is not defined.
Return a tree for the type. */
tree
xref_tag (enum tree_code code, tree name)
{
return parser_xref_tag (input_location, code, name).spec;
}
/* Make sure that the tag NAME is defined *in the current scope*
at least as a forward reference.
LOC is the location of the struct's definition.
CODE says which kind of tag NAME ought to be.
This stores the current value of the file static STRUCT_PARSE_INFO
in *ENCLOSING_STRUCT_PARSE_INFO, and points STRUCT_PARSE_INFO at a
new c_struct_parse_info structure. The old value of
STRUCT_PARSE_INFO is restored in finish_struct. */
tree
start_struct (location_t loc, enum tree_code code, tree name,
struct c_struct_parse_info **enclosing_struct_parse_info)
{
/* If there is already a tag defined at this scope
(as a forward reference), just return it. */
tree ref = NULL_TREE;
location_t refloc = UNKNOWN_LOCATION;
if (name != NULL_TREE)
ref = lookup_tag (code, name, 1, &refloc);
if (ref && TREE_CODE (ref) == code)
{
if (TYPE_SIZE (ref))
{
if (code == UNION_TYPE)
error_at (loc, "redefinition of %<union %E%>", name);
else
error_at (loc, "redefinition of %<struct %E%>", name);
if (refloc != UNKNOWN_LOCATION)
inform (refloc, "originally defined here");
/* Don't create structures using a name already in use. */
ref = NULL_TREE;
}
else if (C_TYPE_BEING_DEFINED (ref))
{
if (code == UNION_TYPE)
error_at (loc, "nested redefinition of %<union %E%>", name);
else
error_at (loc, "nested redefinition of %<struct %E%>", name);
/* Don't bother to report "originally defined here" for a
nested redefinition; the original definition should be
obvious. */
/* Don't create structures that contain themselves. */
ref = NULL_TREE;
}
}
/* Otherwise create a forward-reference just so the tag is in scope. */
if (ref == NULL_TREE || TREE_CODE (ref) != code)
{
ref = make_node (code);
pushtag (loc, name, ref);
}
C_TYPE_BEING_DEFINED (ref) = 1;
TYPE_PACKED (ref) = flag_pack_struct;
*enclosing_struct_parse_info = struct_parse_info;
struct_parse_info = XNEW (struct c_struct_parse_info);
struct_parse_info->struct_types.create (0);
struct_parse_info->fields.create (0);
struct_parse_info->typedefs_seen.create (0);
/* FIXME: This will issue a warning for a use of a type defined
within a statement expr used within sizeof, et. al. This is not
terribly serious as C++ doesn't permit statement exprs within
sizeof anyhow. */
if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof))
warning_at (loc, OPT_Wc___compat,
"defining type in %qs expression is invalid in C++",
(in_sizeof
? "sizeof"
: (in_typeof ? "typeof" : "alignof")));
return ref;
}
/* Process the specs, declarator and width (NULL if omitted)
of a structure component, returning a FIELD_DECL node.
WIDTH is non-NULL for bit-fields only, and is an INTEGER_CST node.
DECL_ATTRS is as for grokdeclarator.
LOC is the location of the structure component.
This is done during the parsing of the struct declaration.
The FIELD_DECL nodes are chained together and the lot of them
are ultimately passed to `build_struct' to make the RECORD_TYPE node. */
tree
grokfield (location_t loc,
struct c_declarator *declarator, struct c_declspecs *declspecs,
tree width, tree *decl_attrs)
{
tree value;
if (declarator->kind == cdk_id && declarator->u.id == NULL_TREE
&& width == NULL_TREE)
{
/* This is an unnamed decl.
If we have something of the form "union { list } ;" then this
is the anonymous union extension. Similarly for struct.
If this is something of the form "struct foo;", then
If MS or Plan 9 extensions are enabled, this is handled as
an anonymous struct.
Otherwise this is a forward declaration of a structure tag.
If this is something of the form "foo;" and foo is a TYPE_DECL, then
If foo names a structure or union without a tag, then this
is an anonymous struct (this is permitted by C11).
If MS or Plan 9 extensions are enabled and foo names a
structure, then again this is an anonymous struct.
Otherwise this is an error.
Oh what a horrid tangled web we weave. I wonder if MS consciously
took this from Plan 9 or if it was an accident of implementation
that took root before someone noticed the bug... */
tree type = declspecs->type;
bool type_ok = (TREE_CODE (type) == RECORD_TYPE
|| TREE_CODE (type) == UNION_TYPE);
bool ok = false;
if (type_ok
&& (flag_ms_extensions
|| flag_plan9_extensions
|| !declspecs->typedef_p))
{
if (flag_ms_extensions || flag_plan9_extensions)
ok = true;
else if (TYPE_NAME (type) == NULL)
ok = true;
else
ok = false;
}
if (!ok)
{
pedwarn (loc, 0, "declaration does not declare anything");
return NULL_TREE;
}
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 doesn%'t support unnamed structs/unions");
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 doesn%'t support unnamed structs/unions");
}
value = grokdeclarator (declarator, declspecs, FIELD, false,
width ? &width : NULL, decl_attrs, NULL, NULL,
DEPRECATED_NORMAL);
finish_decl (value, loc, NULL_TREE, NULL_TREE, NULL_TREE);
DECL_INITIAL (value) = width;
if (warn_cxx_compat && DECL_NAME (value) != NULL_TREE)
{
/* If we currently have a binding for this field, set the
in_struct field in the binding, so that we warn about lookups
which find it. */
struct c_binding *b = I_SYMBOL_BINDING (DECL_NAME (value));
if (b != NULL)
{
/* If the in_struct field is not yet set, push it on a list
to be cleared when this struct is finished. */
if (!b->in_struct)
{
struct_parse_info->fields.safe_push (b);
b->in_struct = 1;
}
}
}
return value;
}
/* Subroutine of detect_field_duplicates: return whether X and Y,
which are both fields in the same struct, have duplicate field
names. */
static bool
is_duplicate_field (tree x, tree y)
{
if (DECL_NAME (x) != NULL_TREE && DECL_NAME (x) == DECL_NAME (y))
return true;
/* When using -fplan9-extensions, an anonymous field whose name is a
typedef can duplicate a field name. */
if (flag_plan9_extensions
&& (DECL_NAME (x) == NULL_TREE || DECL_NAME (y) == NULL_TREE))
{
tree xt, xn, yt, yn;
xt = TREE_TYPE (x);
if (DECL_NAME (x) != NULL_TREE)
xn = DECL_NAME (x);
else if ((TREE_CODE (xt) == RECORD_TYPE || TREE_CODE (xt) == UNION_TYPE)
&& TYPE_NAME (xt) != NULL_TREE
&& TREE_CODE (TYPE_NAME (xt)) == TYPE_DECL)
xn = DECL_NAME (TYPE_NAME (xt));
else
xn = NULL_TREE;
yt = TREE_TYPE (y);
if (DECL_NAME (y) != NULL_TREE)
yn = DECL_NAME (y);
else if ((TREE_CODE (yt) == RECORD_TYPE || TREE_CODE (yt) == UNION_TYPE)
&& TYPE_NAME (yt) != NULL_TREE
&& TREE_CODE (TYPE_NAME (yt)) == TYPE_DECL)
yn = DECL_NAME (TYPE_NAME (yt));
else
yn = NULL_TREE;
if (xn != NULL_TREE && xn == yn)
return true;
}
return false;
}
/* Subroutine of detect_field_duplicates: add the fields of FIELDLIST
to HTAB, giving errors for any duplicates. */
static void
detect_field_duplicates_hash (tree fieldlist,
hash_table<pointer_hash <tree_node> > *htab)
{
tree x, y;
tree_node **slot;
for (x = fieldlist; x ; x = DECL_CHAIN (x))
if ((y = DECL_NAME (x)) != 0)
{
slot = htab->find_slot (y, INSERT);
if (*slot)
{
error ("duplicate member %q+D", x);
DECL_NAME (x) = NULL_TREE;
}
*slot = y;
}
else if (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (x)) == UNION_TYPE)
{
detect_field_duplicates_hash (TYPE_FIELDS (TREE_TYPE (x)), htab);
/* When using -fplan9-extensions, an anonymous field whose
name is a typedef can duplicate a field name. */
if (flag_plan9_extensions
&& TYPE_NAME (TREE_TYPE (x)) != NULL_TREE
&& TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL)
{
tree xn = DECL_NAME (TYPE_NAME (TREE_TYPE (x)));
slot = htab->find_slot (xn, INSERT);
if (*slot)
error ("duplicate member %q+D", TYPE_NAME (TREE_TYPE (x)));
*slot = xn;
}
}
}
/* Generate an error for any duplicate field names in FIELDLIST. Munge
the list such that this does not present a problem later. */
static void
detect_field_duplicates (tree fieldlist)
{
tree x, y;
int timeout = 10;
/* If the struct is the list of instance variables of an Objective-C
class, then we need to check all the instance variables of
superclasses when checking for duplicates (since you can't have
an instance variable in a subclass with the same name as an
instance variable in a superclass). We pass on this job to the
Objective-C compiler. objc_detect_field_duplicates() will return
false if we are not checking the list of instance variables and
the C frontend should proceed with the standard field duplicate
checks. If we are checking the list of instance variables, the
ObjC frontend will do the check, emit the errors if needed, and
then return true. */
if (c_dialect_objc ())
if (objc_detect_field_duplicates (false))
return;
/* First, see if there are more than "a few" fields.
This is trivially true if there are zero or one fields. */
if (!fieldlist || !DECL_CHAIN (fieldlist))
return;
x = fieldlist;
do {
timeout--;
if (DECL_NAME (x) == NULL_TREE
&& (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (x)) == UNION_TYPE))
timeout = 0;
x = DECL_CHAIN (x);
} while (timeout > 0 && x);
/* If there were "few" fields and no anonymous structures or unions,
avoid the overhead of allocating a hash table. Instead just do
the nested traversal thing. */
if (timeout > 0)
{
for (x = DECL_CHAIN (fieldlist); x; x = DECL_CHAIN (x))
/* When using -fplan9-extensions, we can have duplicates
between typedef names and fields. */
if (DECL_NAME (x)
|| (flag_plan9_extensions
&& DECL_NAME (x) == NULL_TREE
&& (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (x)) == UNION_TYPE)
&& TYPE_NAME (TREE_TYPE (x)) != NULL_TREE
&& TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL))
{
for (y = fieldlist; y != x; y = TREE_CHAIN (y))
if (is_duplicate_field (y, x))
{
error ("duplicate member %q+D", x);
DECL_NAME (x) = NULL_TREE;
}
}
}
else
{
hash_table<pointer_hash <tree_node> > htab (37);
detect_field_duplicates_hash (fieldlist, &htab);
}
}
/* Finish up struct info used by -Wc++-compat. */
static void
warn_cxx_compat_finish_struct (tree fieldlist)
{
unsigned int ix;
tree x;
struct c_binding *b;
/* Set the C_TYPE_DEFINED_IN_STRUCT flag for each type defined in
the current struct. We do this now at the end of the struct
because the flag is used to issue visibility warnings, and we
only want to issue those warnings if the type is referenced
outside of the struct declaration. */
FOR_EACH_VEC_ELT (struct_parse_info->struct_types, ix, x)
C_TYPE_DEFINED_IN_STRUCT (x) = 1;
/* The TYPEDEFS_SEEN field of STRUCT_PARSE_INFO is a list of
typedefs used when declaring fields in this struct. If the name
of any of the fields is also a typedef name then the struct would
not parse in C++, because the C++ lookup rules say that the
typedef name would be looked up in the context of the struct, and
would thus be the field rather than the typedef. */
if (!struct_parse_info->typedefs_seen.is_empty ()
&& fieldlist != NULL_TREE)
{
/* Use a hash_set<tree> using the name of the typedef. We can use
a hash_set<tree> because identifiers are interned. */
hash_set<tree> tset;
FOR_EACH_VEC_ELT (struct_parse_info->typedefs_seen, ix, x)
tset.add (DECL_NAME (x));
for (x = fieldlist; x != NULL_TREE; x = DECL_CHAIN (x))
{
if (DECL_NAME (x) != NULL_TREE
&& tset.contains (DECL_NAME (x)))
{
warning_at (DECL_SOURCE_LOCATION (x), OPT_Wc___compat,
("using %qD as both field and typedef name is "
"invalid in C++"),
x);
/* FIXME: It would be nice to report the location where
the typedef name is used. */
}
}
}
/* For each field which has a binding and which was not defined in
an enclosing struct, clear the in_struct field. */
FOR_EACH_VEC_ELT (struct_parse_info->fields, ix, b)
b->in_struct = 0;
}
/* Fill in the fields of a RECORD_TYPE or UNION_TYPE node, T.
LOC is the location of the RECORD_TYPE or UNION_TYPE's definition.
FIELDLIST is a chain of FIELD_DECL nodes for the fields.
ATTRIBUTES are attributes to be applied to the structure.
ENCLOSING_STRUCT_PARSE_INFO is the value of STRUCT_PARSE_INFO when
the struct was started. */
tree
finish_struct (location_t loc, tree t, tree fieldlist, tree attributes,
struct c_struct_parse_info *enclosing_struct_parse_info)
{
tree x;
bool toplevel = file_scope == current_scope;
int saw_named_field;
/* If this type was previously laid out as a forward reference,
make sure we lay it out again. */
TYPE_SIZE (t) = 0;
decl_attributes (&t, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
if (pedantic)
{
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
if (DECL_NAME (x) != 0)
break;
if (flag_isoc11
&& (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (x)) == UNION_TYPE))
break;
}
if (x == 0)
{
if (TREE_CODE (t) == UNION_TYPE)
{
if (fieldlist)
pedwarn (loc, OPT_Wpedantic, "union has no named members");
else
pedwarn (loc, OPT_Wpedantic, "union has no members");
}
else
{
if (fieldlist)
pedwarn (loc, OPT_Wpedantic, "struct has no named members");
else
pedwarn (loc, OPT_Wpedantic, "struct has no members");
}
}
}
/* Install struct as DECL_CONTEXT of each field decl.
Also process specified field sizes, found in the DECL_INITIAL,
storing 0 there after the type has been changed to precision equal
to its width, rather than the precision of the specified standard
type. (Correct layout requires the original type to have been preserved
until now.) */
saw_named_field = 0;
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
if (TREE_TYPE (x) == error_mark_node)
continue;
DECL_CONTEXT (x) = t;
/* If any field is const, the structure type is pseudo-const. */
if (TREE_READONLY (x))
C_TYPE_FIELDS_READONLY (t) = 1;
else
{
/* A field that is pseudo-const makes the structure likewise. */
tree t1 = strip_array_types (TREE_TYPE (x));
if ((TREE_CODE (t1) == RECORD_TYPE || TREE_CODE (t1) == UNION_TYPE)
&& C_TYPE_FIELDS_READONLY (t1))
C_TYPE_FIELDS_READONLY (t) = 1;
}
/* Any field that is volatile means variables of this type must be
treated in some ways as volatile. */
if (TREE_THIS_VOLATILE (x))
C_TYPE_FIELDS_VOLATILE (t) = 1;
/* Any field of nominal variable size implies structure is too. */
if (C_DECL_VARIABLE_SIZE (x))
C_TYPE_VARIABLE_SIZE (t) = 1;
if (DECL_INITIAL (x))
{
unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (x));
DECL_SIZE (x) = bitsize_int (width);
DECL_BIT_FIELD (x) = 1;
SET_DECL_C_BIT_FIELD (x);
}
if (TYPE_PACKED (t)
&& (DECL_BIT_FIELD (x)
|| TYPE_ALIGN (TREE_TYPE (x)) > BITS_PER_UNIT))
DECL_PACKED (x) = 1;
/* Detect flexible array member in an invalid context. */
if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
&& TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE
&& TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE
&& TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE)
{
if (TREE_CODE (t) == UNION_TYPE)
{
error_at (DECL_SOURCE_LOCATION (x),
"flexible array member in union");
TREE_TYPE (x) = error_mark_node;
}
else if (DECL_CHAIN (x) != NULL_TREE)
{
error_at (DECL_SOURCE_LOCATION (x),
"flexible array member not at end of struct");
TREE_TYPE (x) = error_mark_node;
}
else if (!saw_named_field)
{
error_at (DECL_SOURCE_LOCATION (x),
"flexible array member in otherwise empty struct");
TREE_TYPE (x) = error_mark_node;
}
}
if (pedantic && TREE_CODE (t) == RECORD_TYPE
&& flexible_array_type_p (TREE_TYPE (x)))
pedwarn (DECL_SOURCE_LOCATION (x), OPT_Wpedantic,
"invalid use of structure with flexible array member");
if (DECL_NAME (x)
|| TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (x)) == UNION_TYPE)
saw_named_field = 1;
}
detect_field_duplicates (fieldlist);
/* Now we have the nearly final fieldlist. Record it,
then lay out the structure or union (including the fields). */
TYPE_FIELDS (t) = fieldlist;
layout_type (t);
if (TYPE_SIZE_UNIT (t)
&& TREE_CODE (TYPE_SIZE_UNIT (t)) == INTEGER_CST
&& !TREE_OVERFLOW (TYPE_SIZE_UNIT (t))
&& !valid_constant_size_p (TYPE_SIZE_UNIT (t)))
error ("type %qT is too large", t);
/* Give bit-fields their proper types. */
{
tree *fieldlistp = &fieldlist;
while (*fieldlistp)
if (TREE_CODE (*fieldlistp) == FIELD_DECL && DECL_INITIAL (*fieldlistp)
&& TREE_TYPE (*fieldlistp) != error_mark_node)
{
unsigned HOST_WIDE_INT width
= tree_to_uhwi (DECL_INITIAL (*fieldlistp));
tree type = TREE_TYPE (*fieldlistp);
if (width != TYPE_PRECISION (type))
{
TREE_TYPE (*fieldlistp)
= c_build_bitfield_integer_type (width, TYPE_UNSIGNED (type));
DECL_MODE (*fieldlistp) = TYPE_MODE (TREE_TYPE (*fieldlistp));
}
DECL_INITIAL (*fieldlistp) = 0;
}
else
fieldlistp = &DECL_CHAIN (*fieldlistp);
}
/* Now we have the truly final field list.
Store it in this type and in the variants. */
TYPE_FIELDS (t) = fieldlist;
/* If there are lots of fields, sort so we can look through them fast.
We arbitrarily consider 16 or more elts to be "a lot". */
{
int len = 0;
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
if (len > 15 || DECL_NAME (x) == NULL)
break;
len += 1;
}
if (len > 15)
{
tree *field_array;
struct lang_type *space;
struct sorted_fields_type *space2;
len += list_length (x);
/* Use the same allocation policy here that make_node uses, to
ensure that this lives as long as the rest of the struct decl.
All decls in an inline function need to be saved. */
space = ggc_cleared_alloc<struct lang_type> ();
space2 = (sorted_fields_type *) ggc_internal_alloc
(sizeof (struct sorted_fields_type) + len * sizeof (tree));
len = 0;
space->s = space2;
field_array = &space2->elts[0];
for (x = fieldlist; x; x = DECL_CHAIN (x))
{
field_array[len++] = x;
/* If there is anonymous struct or union, break out of the loop. */
if (DECL_NAME (x) == NULL)
break;
}
/* Found no anonymous struct/union. Add the TYPE_LANG_SPECIFIC. */
if (x == NULL)
{
TYPE_LANG_SPECIFIC (t) = space;
TYPE_LANG_SPECIFIC (t)->s->len = len;
field_array = TYPE_LANG_SPECIFIC (t)->s->elts;
qsort (field_array, len, sizeof (tree), field_decl_cmp);
}
}
}
for (x = TYPE_MAIN_VARIANT (t); x; x = TYPE_NEXT_VARIANT (x))
{
TYPE_FIELDS (x) = TYPE_FIELDS (t);
TYPE_LANG_SPECIFIC (x) = TYPE_LANG_SPECIFIC (t);
C_TYPE_FIELDS_READONLY (x) = C_TYPE_FIELDS_READONLY (t);
C_TYPE_FIELDS_VOLATILE (x) = C_TYPE_FIELDS_VOLATILE (t);
C_TYPE_VARIABLE_SIZE (x) = C_TYPE_VARIABLE_SIZE (t);
}
/* If this was supposed to be a transparent union, but we can't
make it one, warn and turn off the flag. */
if (TREE_CODE (t) == UNION_TYPE
&& TYPE_TRANSPARENT_AGGR (t)
&& (!TYPE_FIELDS (t) || TYPE_MODE (t) != DECL_MODE (TYPE_FIELDS (t))))
{
TYPE_TRANSPARENT_AGGR (t) = 0;
warning_at (loc, 0, "union cannot be made transparent");
}
/* If this structure or union completes the type of any previous
variable declaration, lay it out and output its rtl. */
for (x = C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t));
x;
x = TREE_CHAIN (x))
{
tree decl = TREE_VALUE (x);
if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
layout_array_type (TREE_TYPE (decl));
if (TREE_CODE (decl) != TYPE_DECL)
{
layout_decl (decl, 0);
if (c_dialect_objc ())
objc_check_decl (decl);
rest_of_decl_compilation (decl, toplevel, 0);
}
}
C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t)) = 0;
/* Update type location to the one of the definition, instead of e.g.
a forward declaration. */
if (TYPE_STUB_DECL (t))
DECL_SOURCE_LOCATION (TYPE_STUB_DECL (t)) = loc;
/* Finish debugging output for this type. */
rest_of_type_compilation (t, toplevel);
/* If we're inside a function proper, i.e. not file-scope and not still
parsing parameters, then arrange for the size of a variable sized type
to be bound now. */
if (building_stmt_list_p () && variably_modified_type_p (t, NULL_TREE))
add_stmt (build_stmt (loc,
DECL_EXPR, build_decl (loc, TYPE_DECL, NULL, t)));
if (warn_cxx_compat)
warn_cxx_compat_finish_struct (fieldlist);
struct_parse_info->struct_types.release ();
struct_parse_info->fields.release ();
struct_parse_info->typedefs_seen.release ();
XDELETE (struct_parse_info);
struct_parse_info = enclosing_struct_parse_info;
/* If this struct is defined inside a struct, add it to
struct_types. */
if (warn_cxx_compat
&& struct_parse_info != NULL
&& !in_sizeof && !in_typeof && !in_alignof)
struct_parse_info->struct_types.safe_push (t);
return t;
}
/* Lay out the type T, and its element type, and so on. */
static void
layout_array_type (tree t)
{
if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
layout_array_type (TREE_TYPE (t));
layout_type (t);
}
/* Begin compiling the definition of an enumeration type.
NAME is its name (or null if anonymous).
LOC is the enum's location.
Returns the type object, as yet incomplete.
Also records info about it so that build_enumerator
may be used to declare the individual values as they are read. */
tree
start_enum (location_t loc, struct c_enum_contents *the_enum, tree name)
{
tree enumtype = NULL_TREE;
location_t enumloc = UNKNOWN_LOCATION;
/* If this is the real definition for a previous forward reference,
fill in the contents in the same object that used to be the
forward reference. */
if (name != NULL_TREE)
enumtype = lookup_tag (ENUMERAL_TYPE, name, 1, &enumloc);
if (enumtype == 0 || TREE_CODE (enumtype) != ENUMERAL_TYPE)
{
enumtype = make_node (ENUMERAL_TYPE);
pushtag (loc, name, enumtype);
}
if (C_TYPE_BEING_DEFINED (enumtype))
error_at (loc, "nested redefinition of %<enum %E%>", name);
C_TYPE_BEING_DEFINED (enumtype) = 1;
if (TYPE_VALUES (enumtype) != 0)
{
/* This enum is a named one that has been declared already. */
error_at (loc, "redeclaration of %<enum %E%>", name);
if (enumloc != UNKNOWN_LOCATION)
inform (enumloc, "originally defined here");
/* Completely replace its old definition.
The old enumerators remain defined, however. */
TYPE_VALUES (enumtype) = 0;
}
the_enum->enum_next_value = integer_zero_node;
the_enum->enum_overflow = 0;
if (flag_short_enums)
TYPE_PACKED (enumtype) = 1;
/* FIXME: This will issue a warning for a use of a type defined
within sizeof in a statement expr. This is not terribly serious
as C++ doesn't permit statement exprs within sizeof anyhow. */
if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof))
warning_at (loc, OPT_Wc___compat,
"defining type in %qs expression is invalid in C++",
(in_sizeof
? "sizeof"
: (in_typeof ? "typeof" : "alignof")));
return enumtype;
}
/* After processing and defining all the values of an enumeration type,
install their decls in the enumeration type and finish it off.
ENUMTYPE is the type object, VALUES a list of decl-value pairs,
and ATTRIBUTES are the specified attributes.
Returns ENUMTYPE. */
tree
finish_enum (tree enumtype, tree values, tree attributes)
{
tree pair, tem;
tree minnode = 0, maxnode = 0;
int precision;
signop sign;
bool toplevel = (file_scope == current_scope);
struct lang_type *lt;
decl_attributes (&enumtype, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
/* Calculate the maximum value of any enumerator in this type. */
if (values == error_mark_node)
minnode = maxnode = integer_zero_node;
else
{
minnode = maxnode = TREE_VALUE (values);
for (pair = TREE_CHAIN (values); pair; pair = TREE_CHAIN (pair))
{
tree value = TREE_VALUE (pair);
if (tree_int_cst_lt (maxnode, value))
maxnode = value;
if (tree_int_cst_lt (value, minnode))
minnode = value;
}
}
/* Construct the final type of this enumeration. It is the same
as one of the integral types - the narrowest one that fits, except
that normally we only go as narrow as int - and signed iff any of
the values are negative. */
sign = (tree_int_cst_sgn (minnode) >= 0) ? UNSIGNED : SIGNED;
precision = MAX (tree_int_cst_min_precision (minnode, sign),
tree_int_cst_min_precision (maxnode, sign));
if (TYPE_PACKED (enumtype) || precision > TYPE_PRECISION (integer_type_node))
{
tem = c_common_type_for_size (precision, sign == UNSIGNED ? 1 : 0);
if (tem == NULL)
{
warning (0, "enumeration values exceed range of largest integer");
tem = long_long_integer_type_node;
}
}
else
tem = sign == UNSIGNED ? unsigned_type_node : integer_type_node;
TYPE_MIN_VALUE (enumtype) = TYPE_MIN_VALUE (tem);
TYPE_MAX_VALUE (enumtype) = TYPE_MAX_VALUE (tem);
TYPE_UNSIGNED (enumtype) = TYPE_UNSIGNED (tem);
TYPE_SIZE (enumtype) = 0;
/* If the precision of the type was specific with an attribute and it
was too small, give an error. Otherwise, use it. */
if (TYPE_PRECISION (enumtype))
{
if (precision > TYPE_PRECISION (enumtype))
error ("specified mode too small for enumeral values");
}
else
TYPE_PRECISION (enumtype) = TYPE_PRECISION (tem);
layout_type (enumtype);
if (values != error_mark_node)
{
/* Change the type of the enumerators to be the enum type. We
need to do this irrespective of the size of the enum, for
proper type checking. Replace the DECL_INITIALs of the
enumerators, and the value slots of the list, with copies
that have the enum type; they cannot be modified in place
because they may be shared (e.g. integer_zero_node) Finally,
change the purpose slots to point to the names of the decls. */
for (pair = values; pair; pair = TREE_CHAIN (pair))
{
tree enu = TREE_PURPOSE (pair);
tree ini = DECL_INITIAL (enu);
TREE_TYPE (enu) = enumtype;
/* The ISO C Standard mandates enumerators to have type int,
even though the underlying type of an enum type is
unspecified. However, GCC allows enumerators of any
integer type as an extensions. build_enumerator()
converts any enumerators that fit in an int to type int,
to avoid promotions to unsigned types when comparing
integers with enumerators that fit in the int range.
When -pedantic is given, build_enumerator() would have
already warned about those that don't fit. Here we
convert the rest to the enumerator type. */
if (TREE_TYPE (ini) != integer_type_node)
ini = convert (enumtype, ini);
DECL_INITIAL (enu) = ini;
TREE_PURPOSE (pair) = DECL_NAME (enu);
TREE_VALUE (pair) = ini;
}
TYPE_VALUES (enumtype) = values;
}
/* Record the min/max values so that we can warn about bit-field
enumerations that are too small for the values. */
lt = ggc_cleared_alloc<struct lang_type> ();
lt->enum_min = minnode;
lt->enum_max = maxnode;
TYPE_LANG_SPECIFIC (enumtype) = lt;
/* Fix up all variant types of this enum type. */
for (tem = TYPE_MAIN_VARIANT (enumtype); tem; tem = TYPE_NEXT_VARIANT (tem))
{
if (tem == enumtype)
continue;
TYPE_VALUES (tem) = TYPE_VALUES (enumtype);
TYPE_MIN_VALUE (tem) = TYPE_MIN_VALUE (enumtype);
TYPE_MAX_VALUE (tem) = TYPE_MAX_VALUE (enumtype);
TYPE_SIZE (tem) = TYPE_SIZE (enumtype);
TYPE_SIZE_UNIT (tem) = TYPE_SIZE_UNIT (enumtype);
SET_TYPE_MODE (tem, TYPE_MODE (enumtype));
TYPE_PRECISION (tem) = TYPE_PRECISION (enumtype);
TYPE_ALIGN (tem) = TYPE_ALIGN (enumtype);
TYPE_USER_ALIGN (tem) = TYPE_USER_ALIGN (enumtype);
TYPE_UNSIGNED (tem) = TYPE_UNSIGNED (enumtype);
TYPE_LANG_SPECIFIC (tem) = TYPE_LANG_SPECIFIC (enumtype);
}
/* Finish debugging output for this type. */
rest_of_type_compilation (enumtype, toplevel);
/* If this enum is defined inside a struct, add it to
struct_types. */
if (warn_cxx_compat
&& struct_parse_info != NULL
&& !in_sizeof && !in_typeof && !in_alignof)
struct_parse_info->struct_types.safe_push (enumtype);
return enumtype;
}
/* Build and install a CONST_DECL for one value of the
current enumeration type (one that was begun with start_enum).
DECL_LOC is the location of the enumerator.
LOC is the location of the '=' operator if any, DECL_LOC otherwise.
Return a tree-list containing the CONST_DECL and its value.
Assignment of sequential values by default is handled here. */
tree
build_enumerator (location_t decl_loc, location_t loc,
struct c_enum_contents *the_enum, tree name, tree value)
{
tree decl, type;
/* Validate and default VALUE. */
if (value != 0)
{
/* Don't issue more errors for error_mark_node (i.e. an
undeclared identifier) - just ignore the value expression. */
if (value == error_mark_node)
value = 0;
else if (!INTEGRAL_TYPE_P (TREE_TYPE (value)))
{
error_at (loc, "enumerator value for %qE is not an integer constant",
name);
value = 0;
}
else
{
if (TREE_CODE (value) != INTEGER_CST)
{
value = c_fully_fold (value, false, NULL);
if (TREE_CODE (value) == INTEGER_CST)
pedwarn (loc, OPT_Wpedantic,
"enumerator value for %qE is not an integer "
"constant expression", name);
}
if (TREE_CODE (value) != INTEGER_CST)
{
error ("enumerator value for %qE is not an integer constant",
name);
value = 0;
}
else
{
value = default_conversion (value);
constant_expression_warning (value);
}
}
}
/* Default based on previous value. */
/* It should no longer be possible to have NON_LVALUE_EXPR
in the default. */
if (value == 0)
{
value = the_enum->enum_next_value;
if (the_enum->enum_overflow)
error_at (loc, "overflow in enumeration values");
}
/* Even though the underlying type of an enum is unspecified, the
type of enumeration constants is explicitly defined as int
(6.4.4.3/2 in the C99 Standard). GCC allows any integer type as
an extension. */
else if (!int_fits_type_p (value, integer_type_node))
pedwarn (loc, OPT_Wpedantic,
"ISO C restricts enumerator values to range of %<int%>");
/* The ISO C Standard mandates enumerators to have type int, even
though the underlying type of an enum type is unspecified.
However, GCC allows enumerators of any integer type as an
extensions. Here we convert any enumerators that fit in an int
to type int, to avoid promotions to unsigned types when comparing
integers with enumerators that fit in the int range. When
-pedantic is given, we would have already warned about those that
don't fit. We have to do this here rather than in finish_enum
because this value may be used to define more enumerators. */
if (int_fits_type_p (value, integer_type_node))
value = convert (integer_type_node, value);
/* Set basis for default for next value. */
the_enum->enum_next_value
= build_binary_op (EXPR_LOC_OR_LOC (value, input_location),
PLUS_EXPR, value, integer_one_node, 0);
the_enum->enum_overflow = tree_int_cst_lt (the_enum->enum_next_value, value);
/* Now create a declaration for the enum value name. */
type = TREE_TYPE (value);
type = c_common_type_for_size (MAX (TYPE_PRECISION (type),
TYPE_PRECISION (integer_type_node)),
(TYPE_PRECISION (type)
>= TYPE_PRECISION (integer_type_node)
&& TYPE_UNSIGNED (type)));
decl = build_decl (decl_loc, CONST_DECL, name, type);
DECL_INITIAL (decl) = convert (type, value);
pushdecl (decl);
return tree_cons (decl, value, NULL_TREE);
}
/* Create the FUNCTION_DECL for a function definition.
DECLSPECS, DECLARATOR and ATTRIBUTES are the parts of
the declaration; they describe the function's name and the type it returns,
but twisted together in a fashion that parallels the syntax of C.
This function creates a binding context for the function body
as well as setting up the FUNCTION_DECL in current_function_decl.
Returns 1 on success. If the DECLARATOR is not suitable for a function
(it defines a datum instead), we return 0, which tells
yyparse to report a parse error. */
int
start_function (struct c_declspecs *declspecs, struct c_declarator *declarator,
tree attributes)
{
tree decl1, old_decl;
tree restype, resdecl;
location_t loc;
current_function_returns_value = 0; /* Assume, until we see it does. */
current_function_returns_null = 0;
current_function_returns_abnormally = 0;
warn_about_return_type = 0;
c_switch_stack = NULL;
/* Indicate no valid break/continue context by setting these variables
to some non-null, non-label value. We'll notice and emit the proper
error message in c_finish_bc_stmt. */
c_break_label = c_cont_label = size_zero_node;
decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, true, NULL,
&attributes, NULL, NULL, DEPRECATED_NORMAL);
/* If the declarator is not suitable for a function definition,
cause a syntax error. */
if (decl1 == 0
|| TREE_CODE (decl1) != FUNCTION_DECL)
return 0;
loc = DECL_SOURCE_LOCATION (decl1);
c_decl_attributes (&decl1, attributes, 0);
if (DECL_DECLARED_INLINE_P (decl1)
&& DECL_UNINLINABLE (decl1)
&& lookup_attribute ("noinline", DECL_ATTRIBUTES (decl1)))
warning_at (loc, OPT_Wattributes,
"inline function %qD given attribute noinline",
decl1);
/* Handle gnu_inline attribute. */
if (declspecs->inline_p
&& !flag_gnu89_inline
&& TREE_CODE (decl1) == FUNCTION_DECL
&& (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl1))
|| current_function_decl))
{
if (declspecs->storage_class != csc_static)
DECL_EXTERNAL (decl1) = !DECL_EXTERNAL (decl1);
}
announce_function (decl1);
if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (TREE_TYPE (decl1))))
{
error_at (loc, "return type is an incomplete type");
/* Make it return void instead. */
TREE_TYPE (decl1)
= build_function_type (void_type_node,
TYPE_ARG_TYPES (TREE_TYPE (decl1)));
}
if (warn_about_return_type)
warn_defaults_to (loc, flag_isoc99 ? OPT_Wimplicit_int
: (warn_return_type ? OPT_Wreturn_type
: OPT_Wimplicit_int),
"return type defaults to %<int%>");
/* Make the init_value nonzero so pushdecl knows this is not tentative.
error_mark_node is replaced below (in pop_scope) with the BLOCK. */
DECL_INITIAL (decl1) = error_mark_node;
/* A nested function is not global. */
if (current_function_decl != 0)
TREE_PUBLIC (decl1) = 0;
/* If this definition isn't a prototype and we had a prototype declaration
before, copy the arg type info from that prototype. */
old_decl = lookup_name_in_scope (DECL_NAME (decl1), current_scope);
if (old_decl && TREE_CODE (old_decl) != FUNCTION_DECL)
old_decl = 0;
current_function_prototype_locus = UNKNOWN_LOCATION;
current_function_prototype_built_in = false;
current_function_prototype_arg_types = NULL_TREE;
if (!prototype_p (TREE_TYPE (decl1)))
{
if (old_decl != 0 && TREE_CODE (TREE_TYPE (old_decl)) == FUNCTION_TYPE
&& comptypes (TREE_TYPE (TREE_TYPE (decl1)),
TREE_TYPE (TREE_TYPE (old_decl))))
{
TREE_TYPE (decl1) = composite_type (TREE_TYPE (old_decl),
TREE_TYPE (decl1));
current_function_prototype_locus = DECL_SOURCE_LOCATION (old_decl);
current_function_prototype_built_in
= C_DECL_BUILTIN_PROTOTYPE (old_decl);
current_function_prototype_arg_types
= TYPE_ARG_TYPES (TREE_TYPE (decl1));
}
if (TREE_PUBLIC (decl1))
{
/* If there is an external prototype declaration of this
function, record its location but do not copy information
to this decl. This may be an invisible declaration
(built-in or in a scope which has finished) or simply
have more refined argument types than any declaration
found above. */
struct c_binding *b;
for (b = I_SYMBOL_BINDING (DECL_NAME (decl1)); b; b = b->shadowed)
if (B_IN_SCOPE (b, external_scope))
break;
if (b)
{
tree ext_decl, ext_type;
ext_decl = b->decl;
ext_type = b->u.type ? b->u.type : TREE_TYPE (ext_decl);
if (TREE_CODE (ext_type) == FUNCTION_TYPE
&& comptypes (TREE_TYPE (TREE_TYPE (decl1)),
TREE_TYPE (ext_type)))
{
current_function_prototype_locus
= DECL_SOURCE_LOCATION (ext_decl);
current_function_prototype_built_in
= C_DECL_BUILTIN_PROTOTYPE (ext_decl);
current_function_prototype_arg_types
= TYPE_ARG_TYPES (ext_type);
}
}
}
}
/* Optionally warn of old-fashioned def with no previous prototype. */
if (warn_strict_prototypes
&& old_decl != error_mark_node
&& !prototype_p (TREE_TYPE (decl1))
&& C_DECL_ISNT_PROTOTYPE (old_decl))
warning_at (loc, OPT_Wstrict_prototypes,
"function declaration isn%'t a prototype");
/* Optionally warn of any global def with no previous prototype. */
else if (warn_missing_prototypes
&& old_decl != error_mark_node
&& TREE_PUBLIC (decl1)
&& !MAIN_NAME_P (DECL_NAME (decl1))
&& C_DECL_ISNT_PROTOTYPE (old_decl)
&& !DECL_DECLARED_INLINE_P (decl1))
warning_at (loc, OPT_Wmissing_prototypes,
"no previous prototype for %qD", decl1);
/* Optionally warn of any def with no previous prototype
if the function has already been used. */
else if (warn_missing_prototypes
&& old_decl != 0
&& old_decl != error_mark_node
&& TREE_USED (old_decl)
&& !prototype_p (TREE_TYPE (old_decl)))
warning_at (loc, OPT_Wmissing_prototypes,
"%qD was used with no prototype before its definition", decl1);
/* Optionally warn of any global def with no previous declaration. */
else if (warn_missing_declarations
&& TREE_PUBLIC (decl1)
&& old_decl == 0
&& !MAIN_NAME_P (DECL_NAME (decl1))
&& !DECL_DECLARED_INLINE_P (decl1))
warning_at (loc, OPT_Wmissing_declarations,
"no previous declaration for %qD",
decl1);
/* Optionally warn of any def with no previous declaration
if the function has already been used. */
else if (warn_missing_declarations
&& old_decl != 0
&& old_decl != error_mark_node
&& TREE_USED (old_decl)
&& C_DECL_IMPLICIT (old_decl))
warning_at (loc, OPT_Wmissing_declarations,
"%qD was used with no declaration before its definition", decl1);
/* This function exists in static storage.
(This does not mean `static' in the C sense!) */
TREE_STATIC (decl1) = 1;
/* This is the earliest point at which we might know the assembler
name of the function. Thus, if it's set before this, die horribly. */
gcc_assert (!DECL_ASSEMBLER_NAME_SET_P (decl1));
/* If #pragma weak was used, mark the decl weak now. */
if (current_scope == file_scope)
maybe_apply_pragma_weak (decl1);
/* Warn for unlikely, improbable, or stupid declarations of `main'. */
if (warn_main && MAIN_NAME_P (DECL_NAME (decl1)))
{
if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl1)))
!= integer_type_node)
pedwarn (loc, OPT_Wmain, "return type of %qD is not %<int%>", decl1);
else if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (decl1))))
pedwarn (loc, OPT_Wmain, "%<_Atomic%>-qualified return type of %qD",
decl1);
check_main_parameter_types (decl1);
if (!TREE_PUBLIC (decl1))
pedwarn (loc, OPT_Wmain,
"%qD is normally a non-static function", decl1);
}
/* Record the decl so that the function name is defined.
If we already have a decl for this name, and it is a FUNCTION_DECL,
use the old decl. */
current_function_decl = pushdecl (decl1);
push_scope ();
declare_parm_level ();
restype = TREE_TYPE (TREE_TYPE (current_function_decl));
resdecl = build_decl (loc, RESULT_DECL, NULL_TREE, restype);
DECL_ARTIFICIAL (resdecl) = 1;
DECL_IGNORED_P (resdecl) = 1;
DECL_RESULT (current_function_decl) = resdecl;
start_fname_decls ();
return 1;
}
/* Subroutine of store_parm_decls which handles new-style function
definitions (prototype format). The parms already have decls, so we
need only record them as in effect and complain if any redundant
old-style parm decls were written. */
static void
store_parm_decls_newstyle (tree fndecl, const struct c_arg_info *arg_info)
{
tree decl;
c_arg_tag *tag;
unsigned ix;
if (current_scope->bindings)
{
error_at (DECL_SOURCE_LOCATION (fndecl),
"old-style parameter declarations in prototyped "
"function definition");
/* Get rid of the old-style declarations. */
pop_scope ();
push_scope ();
}
/* Don't issue this warning for nested functions, and don't issue this
warning if we got here because ARG_INFO_TYPES was error_mark_node
(this happens when a function definition has just an ellipsis in
its parameter list). */
else if (!in_system_header_at (input_location)
&& !current_function_scope
&& arg_info->types != error_mark_node)
warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wtraditional,
"traditional C rejects ISO C style function definitions");
/* Now make all the parameter declarations visible in the function body.
We can bypass most of the grunt work of pushdecl. */
for (decl = arg_info->parms; decl; decl = DECL_CHAIN (decl))
{
DECL_CONTEXT (decl) = current_function_decl;
if (DECL_NAME (decl))
{
bind (DECL_NAME (decl), decl, current_scope,
/*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
if (!TREE_USED (decl))
warn_if_shadowing (decl);
}
else
error_at (DECL_SOURCE_LOCATION (decl), "parameter name omitted");
}
/* Record the parameter list in the function declaration. */
DECL_ARGUMENTS (fndecl) = arg_info->parms;
/* Now make all the ancillary declarations visible, likewise. */
for (decl = arg_info->others; decl; decl = DECL_CHAIN (decl))
{
DECL_CONTEXT (decl) = current_function_decl;
if (DECL_NAME (decl))
bind (DECL_NAME (decl), decl, current_scope,
/*invisible=*/false,
/*nested=*/(TREE_CODE (decl) == FUNCTION_DECL),
UNKNOWN_LOCATION);
}
/* And all the tag declarations. */
FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag)
if (tag->id)
bind (tag->id, tag->type, current_scope,
/*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION);
}
/* Subroutine of store_parm_decls which handles old-style function
definitions (separate parameter list and declarations). */
static void
store_parm_decls_oldstyle (tree fndecl, const struct c_arg_info *arg_info)
{
struct c_binding *b;
tree parm, decl, last;
tree parmids = arg_info->parms;
hash_set<tree> seen_args;
if (!in_system_header_at (input_location))
warning_at (DECL_SOURCE_LOCATION (fndecl),
OPT_Wold_style_definition, "old-style function definition");
/* Match each formal parameter name with its declaration. Save each
decl in the appropriate TREE_PURPOSE slot of the parmids chain. */
for (parm = parmids; parm; parm = TREE_CHAIN (parm))
{
if (TREE_VALUE (parm) == 0)
{
error_at (DECL_SOURCE_LOCATION (fndecl),
"parameter name missing from parameter list");
TREE_PURPOSE (parm) = 0;
continue;
}
b = I_SYMBOL_BINDING (TREE_VALUE (parm));
if (b && B_IN_CURRENT_SCOPE (b))
{
decl = b->decl;
/* Skip erroneous parameters. */
if (decl == error_mark_node)
continue;
/* If we got something other than a PARM_DECL it is an error. */
if (TREE_CODE (decl) != PARM_DECL)
error_at (DECL_SOURCE_LOCATION (decl),
"%qD declared as a non-parameter", decl);
/* If the declaration is already marked, we have a duplicate
name. Complain and ignore the duplicate. */
else if (seen_args.contains (decl))
{
error_at (DECL_SOURCE_LOCATION (decl),
"multiple parameters named %qD", decl);
TREE_PURPOSE (parm) = 0;
continue;
}
/* If the declaration says "void", complain and turn it into
an int. */
else if (VOID_TYPE_P (TREE_TYPE (decl)))
{
error_at (DECL_SOURCE_LOCATION (decl),
"parameter %qD declared with void type", decl);
TREE_TYPE (decl) = integer_type_node;
DECL_ARG_TYPE (decl) = integer_type_node;
layout_decl (decl, 0);
}
warn_if_shadowing (decl);
}
/* If no declaration found, default to int. */
else
{
/* FIXME diagnostics: This should be the location of the argument,
not the FNDECL. E.g., for an old-style declaration
int f10(v) { blah; }
We should use the location of the V, not the F10.
Unfortunately, the V is an IDENTIFIER_NODE which has no
location. In the future we need locations for c_arg_info
entries.
See gcc.dg/Wshadow-3.c for an example of this problem. */
decl = build_decl (DECL_SOURCE_LOCATION (fndecl),
PARM_DECL, TREE_VALUE (parm), integer_type_node);
DECL_ARG_TYPE (decl) = TREE_TYPE (decl);
pushdecl (decl);
warn_if_shadowing (decl);
if (flag_isoc99)
pedwarn (DECL_SOURCE_LOCATION (decl),
OPT_Wimplicit_int, "type of %qD defaults to %<int%>",
decl);
else
warning_at (DECL_SOURCE_LOCATION (decl),
OPT_Wmissing_parameter_type,
"type of %qD defaults to %<int%>", decl);
}
TREE_PURPOSE (parm) = decl;
seen_args.add (decl);
}
/* Now examine the parms chain for incomplete declarations
and declarations with no corresponding names. */
for (b = current_scope->bindings; b; b = b->prev)
{
parm = b->decl;
if (TREE_CODE (parm) != PARM_DECL)
continue;
if (TREE_TYPE (parm) != error_mark_node
&& !COMPLETE_TYPE_P (TREE_TYPE (parm)))
{
error_at (DECL_SOURCE_LOCATION (parm),
"parameter %qD has incomplete type", parm);
TREE_TYPE (parm) = error_mark_node;
}
if (!seen_args.contains (parm))
{
error_at (DECL_SOURCE_LOCATION (parm),
"declaration for parameter %qD but no such parameter",
parm);
/* Pretend the parameter was not missing.
This gets us to a standard state and minimizes
further error messages. */
parmids = chainon (parmids, tree_cons (parm, 0, 0));
}
}
/* Chain the declarations together in the order of the list of
names. Store that chain in the function decl, replacing the
list of names. Update the current scope to match. */
DECL_ARGUMENTS (fndecl) = 0;
for (parm = parmids; parm; parm = TREE_CHAIN (parm))
if (TREE_PURPOSE (parm))
break;
if (parm && TREE_PURPOSE (parm))
{
last = TREE_PURPOSE (parm);
DECL_ARGUMENTS (fndecl) = last;
for (parm = TREE_CHAIN (parm); parm; parm = TREE_CHAIN (parm))
if (TREE_PURPOSE (parm))
{
DECL_CHAIN (last) = TREE_PURPOSE (parm);
last = TREE_PURPOSE (parm);
}
DECL_CHAIN (last) = 0;
}
/* If there was a previous prototype,
set the DECL_ARG_TYPE of each argument according to
the type previously specified, and report any mismatches. */
if (current_function_prototype_arg_types)
{
tree type;
for (parm = DECL_ARGUMENTS (fndecl),
type = current_function_prototype_arg_types;
parm || (type && TREE_VALUE (type) != error_mark_node
&& (TYPE_MAIN_VARIANT (TREE_VALUE (type)) != void_type_node));
parm = DECL_CHAIN (parm), type = TREE_CHAIN (type))
{
if (parm == 0 || type == 0
|| TYPE_MAIN_VARIANT (TREE_VALUE (type)) == void_type_node)
{
if (current_function_prototype_built_in)
warning_at (DECL_SOURCE_LOCATION (fndecl),
0, "number of arguments doesn%'t match "
"built-in prototype");
else
{
/* FIXME diagnostics: This should be the location of
FNDECL, but there is bug when a prototype is
declared inside function context, but defined
outside of it (e.g., gcc.dg/pr15698-2.c). In
which case FNDECL gets the location of the
prototype, not the definition. */
error_at (input_location,
"number of arguments doesn%'t match prototype");
error_at (current_function_prototype_locus,
"prototype declaration");
}
break;
}
/* Type for passing arg must be consistent with that
declared for the arg. ISO C says we take the unqualified
type for parameters declared with qualified type. */
if (TREE_TYPE (parm) != error_mark_node
&& TREE_TYPE (type) != error_mark_node
&& ((TYPE_ATOMIC (DECL_ARG_TYPE (parm))
!= TYPE_ATOMIC (TREE_VALUE (type)))
|| !comptypes (TYPE_MAIN_VARIANT (DECL_ARG_TYPE (parm)),
TYPE_MAIN_VARIANT (TREE_VALUE (type)))))
{
if ((TYPE_ATOMIC (DECL_ARG_TYPE (parm))
== TYPE_ATOMIC (TREE_VALUE (type)))
&& (TYPE_MAIN_VARIANT (TREE_TYPE (parm))
== TYPE_MAIN_VARIANT (TREE_VALUE (type))))
{
/* Adjust argument to match prototype. E.g. a previous
`int foo(float);' prototype causes
`int foo(x) float x; {...}' to be treated like
`int foo(float x) {...}'. This is particularly
useful for argument types like uid_t. */
DECL_ARG_TYPE (parm) = TREE_TYPE (parm);
if (targetm.calls.promote_prototypes (TREE_TYPE (current_function_decl))
&& INTEGRAL_TYPE_P (TREE_TYPE (parm))
&& TYPE_PRECISION (TREE_TYPE (parm))
< TYPE_PRECISION (integer_type_node))
DECL_ARG_TYPE (parm)
= c_type_promotes_to (TREE_TYPE (parm));
/* ??? Is it possible to get here with a
built-in prototype or will it always have
been diagnosed as conflicting with an
old-style definition and discarded? */
if (current_function_prototype_built_in)
warning_at (DECL_SOURCE_LOCATION (parm),
OPT_Wpedantic, "promoted argument %qD "
"doesn%'t match built-in prototype", parm);
else
{
pedwarn (DECL_SOURCE_LOCATION (parm),
OPT_Wpedantic, "promoted argument %qD "
"doesn%'t match prototype", parm);
pedwarn (current_function_prototype_locus, OPT_Wpedantic,
"prototype declaration");
}
}
else
{
if (current_function_prototype_built_in)
warning_at (DECL_SOURCE_LOCATION (parm),
0, "argument %qD doesn%'t match "
"built-in prototype", parm);
else
{
error_at (DECL_SOURCE_LOCATION (parm),
"argument %qD doesn%'t match prototype", parm);
error_at (current_function_prototype_locus,
"prototype declaration");
}
}
}
}
TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = 0;
}
/* Otherwise, create a prototype that would match. */
else
{
tree actual = 0, last = 0, type;
for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm))
{
type = tree_cons (NULL_TREE, DECL_ARG_TYPE (parm), NULL_TREE);
if (last)
TREE_CHAIN (last) = type;
else
actual = type;
last = type;
}
type = tree_cons (NULL_TREE, void_type_node, NULL_TREE);
if (last)
TREE_CHAIN (last) = type;
else
actual = type;
/* We are going to assign a new value for the TYPE_ACTUAL_ARG_TYPES
of the type of this function, but we need to avoid having this
affect the types of other similarly-typed functions, so we must
first force the generation of an identical (but separate) type
node for the relevant function type. The new node we create
will be a variant of the main variant of the original function
type. */
TREE_TYPE (fndecl) = build_variant_type_copy (TREE_TYPE (fndecl));
TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = actual;
}
}
/* Store parameter declarations passed in ARG_INFO into the current
function declaration. */
void
store_parm_decls_from (struct c_arg_info *arg_info)
{
current_function_arg_info = arg_info;
store_parm_decls ();
}
/* Store the parameter declarations into the current function declaration.
This is called after parsing the parameter declarations, before
digesting the body of the function.
For an old-style definition, construct a prototype out of the old-style
parameter declarations and inject it into the function's type. */
void
store_parm_decls (void)
{
tree fndecl = current_function_decl;
bool proto;
/* The argument information block for FNDECL. */
struct c_arg_info *arg_info = current_function_arg_info;
current_function_arg_info = 0;
/* True if this definition is written with a prototype. Note:
despite C99 6.7.5.3p14, we can *not* treat an empty argument
list in a function definition as equivalent to (void) -- an
empty argument list specifies the function has no parameters,
but only (void) sets up a prototype for future calls. */
proto = arg_info->types != 0;
if (proto)
store_parm_decls_newstyle (fndecl, arg_info);
else
store_parm_decls_oldstyle (fndecl, arg_info);
/* The next call to push_scope will be a function body. */
next_is_function_body = true;
/* Write a record describing this function definition to the prototypes
file (if requested). */
gen_aux_info_record (fndecl, 1, 0, proto);
/* Initialize the RTL code for the function. */
allocate_struct_function (fndecl, false);
if (warn_unused_local_typedefs)
cfun->language = ggc_cleared_alloc<language_function> ();
/* Begin the statement tree for this function. */
DECL_SAVED_TREE (fndecl) = push_stmt_list ();
/* ??? Insert the contents of the pending sizes list into the function
to be evaluated. The only reason left to have this is
void foo(int n, int array[n++])
because we throw away the array type in favor of a pointer type, and
thus won't naturally see the SAVE_EXPR containing the increment. All
other pending sizes would be handled by gimplify_parameters. */
if (arg_info->pending_sizes)
add_stmt (arg_info->pending_sizes);
}
/* Store PARM_DECLs in PARMS into scope temporarily. Used for
c_finish_omp_declare_simd for function prototypes. No diagnostics
should be done. */
void
temp_store_parm_decls (tree fndecl, tree parms)
{
push_scope ();
for (tree p = parms; p; p = DECL_CHAIN (p))
{
DECL_CONTEXT (p) = fndecl;
if (DECL_NAME (p))
bind (DECL_NAME (p), p, current_scope,
/*invisible=*/false, /*nested=*/false,
UNKNOWN_LOCATION);
}
}
/* Undo what temp_store_parm_decls did. */
void
temp_pop_parm_decls (void)
{
/* Clear all bindings in this temporary scope, so that
pop_scope doesn't create a BLOCK. */
struct c_binding *b = current_scope->bindings;
current_scope->bindings = NULL;
for (; b; b = free_binding_and_advance (b))
{
gcc_assert (TREE_CODE (b->decl) == PARM_DECL);
gcc_assert (I_SYMBOL_BINDING (b->id) == b);
I_SYMBOL_BINDING (b->id) = b->shadowed;
if (b->shadowed && b->shadowed->u.type)
TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type;
}
pop_scope ();
}
/* Finish up a function declaration and compile that function
all the way to assembler language output. Then free the storage
for the function definition.
This is called after parsing the body of the function definition. */
void
finish_function (void)
{
tree fndecl = current_function_decl;
if (c_dialect_objc ())
objc_finish_function ();
if (TREE_CODE (fndecl) == FUNCTION_DECL
&& targetm.calls.promote_prototypes (TREE_TYPE (fndecl)))
{
tree args = DECL_ARGUMENTS (fndecl);
for (; args; args = DECL_CHAIN (args))
{
tree type = TREE_TYPE (args);
if (INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node))
DECL_ARG_TYPE (args) = c_type_promotes_to (type);
}
}
if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node)
BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
/* Must mark the RESULT_DECL as being in this function. */
if (DECL_RESULT (fndecl) && DECL_RESULT (fndecl) != error_mark_node)
DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
if (MAIN_NAME_P (DECL_NAME (fndecl)) && flag_hosted
&& TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (fndecl)))
== integer_type_node && flag_isoc99)
{
/* Hack. We don't want the middle-end to warn that this return
is unreachable, so we mark its location as special. Using
UNKNOWN_LOCATION has the problem that it gets clobbered in
annotate_one_with_locus. A cleaner solution might be to
ensure ! should_carry_locus_p (stmt), but that needs a flag.
*/
c_finish_return (BUILTINS_LOCATION, integer_zero_node, NULL_TREE);
}
/* Tie off the statement tree for this function. */
DECL_SAVED_TREE (fndecl) = pop_stmt_list (DECL_SAVED_TREE (fndecl));
/* If the function has _Cilk_spawn in front of a function call inside it
i.e. it is a spawning function, then add the appropriate Cilk plus
functions inside. */
if (fn_contains_cilk_spawn_p (cfun))
cfun->cilk_frame_decl = insert_cilk_frame (fndecl);
finish_fname_decls ();
/* Complain if there's just no return statement. */
if (warn_return_type
&& TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE
&& !current_function_returns_value && !current_function_returns_null
/* Don't complain if we are no-return. */
&& !current_function_returns_abnormally
/* Don't complain if we are declared noreturn. */
&& !TREE_THIS_VOLATILE (fndecl)
/* Don't warn for main(). */
&& !MAIN_NAME_P (DECL_NAME (fndecl))
/* Or if they didn't actually specify a return type. */
&& !C_FUNCTION_IMPLICIT_INT (fndecl)
/* Normally, with -Wreturn-type, flow will complain, but we might
optimize out static functions. */
&& !TREE_PUBLIC (fndecl))
{
warning (OPT_Wreturn_type,
"no return statement in function returning non-void");
TREE_NO_WARNING (fndecl) = 1;
}
/* Complain about parameters that are only set, but never otherwise used. */
if (warn_unused_but_set_parameter)
{
tree decl;
for (decl = DECL_ARGUMENTS (fndecl);
decl;
decl = DECL_CHAIN (decl))
if (TREE_USED (decl)
&& TREE_CODE (decl) == PARM_DECL
&& !DECL_READ_P (decl)
&& DECL_NAME (decl)
&& !DECL_ARTIFICIAL (decl)
&& !TREE_NO_WARNING (decl))
warning_at (DECL_SOURCE_LOCATION (decl),
OPT_Wunused_but_set_parameter,
"parameter %qD set but not used", decl);
}
/* Complain about locally defined typedefs that are not used in this
function. */
maybe_warn_unused_local_typedefs ();
/* Store the end of the function, so that we get good line number
info for the epilogue. */
cfun->function_end_locus = input_location;
/* Finalize the ELF visibility for the function. */
c_determine_visibility (fndecl);
/* For GNU C extern inline functions disregard inline limits. */
if (DECL_EXTERNAL (fndecl)
&& DECL_DECLARED_INLINE_P (fndecl))
DECL_DISREGARD_INLINE_LIMITS (fndecl) = 1;
/* Genericize before inlining. Delay genericizing nested functions
until their parent function is genericized. Since finalizing
requires GENERIC, delay that as well. */
if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node
&& !undef_nested_function)
{
if (!decl_function_context (fndecl))
{
invoke_plugin_callbacks (PLUGIN_PRE_GENERICIZE, fndecl);
c_genericize (fndecl);
/* ??? Objc emits functions after finalizing the compilation unit.
This should be cleaned up later and this conditional removed. */
if (symtab->global_info_ready)
{
cgraph_node::add_new_function (fndecl, false);
return;
}
cgraph_node::finalize_function (fndecl, false);
}
else
{
/* Register this function with cgraph just far enough to get it
added to our parent's nested function list. Handy, since the
C front end doesn't have such a list. */
(void) cgraph_node::get_create (fndecl);
}
}
if (!decl_function_context (fndecl))
undef_nested_function = false;
if (cfun->language != NULL)
{
ggc_free (cfun->language);
cfun->language = NULL;
}
/* We're leaving the context of this function, so zap cfun.
It's still in DECL_STRUCT_FUNCTION, and we'll restore it in
tree_rest_of_compilation. */
set_cfun (NULL);
current_function_decl = NULL;
}
/* Check the declarations given in a for-loop for satisfying the C99
constraints. If exactly one such decl is found, return it. LOC is
the location of the opening parenthesis of the for loop. The last
parameter allows you to control the "for loop initial declarations
are only allowed in C99 mode". Normally, you should pass
flag_isoc99 as that parameter. But in some cases (Objective-C
foreach loop, for example) we want to run the checks in this
function even if not in C99 mode, so we allow the caller to turn
off the error about not being in C99 mode.
*/
tree
check_for_loop_decls (location_t loc, bool turn_off_iso_c99_error)
{
struct c_binding *b;
tree one_decl = NULL_TREE;
int n_decls = 0;
if (!turn_off_iso_c99_error)
{
static bool hint = true;
/* If we get here, declarations have been used in a for loop without
the C99 for loop scope. This doesn't make much sense, so don't
allow it. */
error_at (loc, "%<for%> loop initial declarations "
"are only allowed in C99 or C11 mode");
if (hint)
{
inform (loc,
"use option -std=c99, -std=gnu99, -std=c11 or -std=gnu11 "
"to compile your code");
hint = false;
}
return NULL_TREE;
}
/* C99 subclause 6.8.5 paragraph 3:
[#3] The declaration part of a for statement shall only
declare identifiers for objects having storage class auto or
register.
It isn't clear whether, in this sentence, "identifiers" binds to
"shall only declare" or to "objects" - that is, whether all identifiers
declared must be identifiers for objects, or whether the restriction
only applies to those that are. (A question on this in comp.std.c
in November 2000 received no answer.) We implement the strictest
interpretation, to avoid creating an extension which later causes
problems. */
for (b = current_scope->bindings; b; b = b->prev)
{
tree id = b->id;
tree decl = b->decl;
if (!id)
continue;
switch (TREE_CODE (decl))
{
case VAR_DECL:
{
location_t decl_loc = DECL_SOURCE_LOCATION (decl);
if (TREE_STATIC (decl))
error_at (decl_loc,
"declaration of static variable %qD in %<for%> loop "
"initial declaration", decl);
else if (DECL_EXTERNAL (decl))
error_at (decl_loc,
"declaration of %<extern%> variable %qD in %<for%> loop "
"initial declaration", decl);
}
break;
case RECORD_TYPE:
error_at (loc,
"%<struct %E%> declared in %<for%> loop initial "
"declaration", id);
break;
case UNION_TYPE:
error_at (loc,
"%<union %E%> declared in %<for%> loop initial declaration",
id);
break;
case ENUMERAL_TYPE:
error_at (loc, "%<enum %E%> declared in %<for%> loop "
"initial declaration", id);
break;
default:
error_at (loc, "declaration of non-variable "
"%qD in %<for%> loop initial declaration", decl);
}
n_decls++;
one_decl = decl;
}
return n_decls == 1 ? one_decl : NULL_TREE;
}
/* Save and reinitialize the variables
used during compilation of a C function. */
void
c_push_function_context (void)
{
struct language_function *p = cfun->language;
/* cfun->language might have been already allocated by the use of
-Wunused-local-typedefs. In that case, just re-use it. */
if (p == NULL)
cfun->language = p = ggc_cleared_alloc<language_function> ();
p->base.x_stmt_tree = c_stmt_tree;
c_stmt_tree.x_cur_stmt_list = vec_safe_copy (c_stmt_tree.x_cur_stmt_list);
p->x_break_label = c_break_label;
p->x_cont_label = c_cont_label;
p->x_switch_stack = c_switch_stack;
p->arg_info = current_function_arg_info;
p->returns_value = current_function_returns_value;
p->returns_null = current_function_returns_null;
p->returns_abnormally = current_function_returns_abnormally;
p->warn_about_return_type = warn_about_return_type;
push_function_context ();
}
/* Restore the variables used during compilation of a C function. */
void
c_pop_function_context (void)
{
struct language_function *p;
pop_function_context ();
p = cfun->language;
/* When -Wunused-local-typedefs is in effect, cfun->languages is
used to store data throughout the life time of the current cfun,
So don't deallocate it. */
if (!warn_unused_local_typedefs)
cfun->language = NULL;
if (DECL_STRUCT_FUNCTION (current_function_decl) == 0
&& DECL_SAVED_TREE (current_function_decl) == NULL_TREE)
{
/* Stop pointing to the local nodes about to be freed. */
/* But DECL_INITIAL must remain nonzero so we know this
was an actual function definition. */
DECL_INITIAL (current_function_decl) = error_mark_node;
DECL_ARGUMENTS (current_function_decl) = 0;
}
c_stmt_tree = p->base.x_stmt_tree;
p->base.x_stmt_tree.x_cur_stmt_list = NULL;
c_break_label = p->x_break_label;
c_cont_label = p->x_cont_label;
c_switch_stack = p->x_switch_stack;
current_function_arg_info = p->arg_info;
current_function_returns_value = p->returns_value;
current_function_returns_null = p->returns_null;
current_function_returns_abnormally = p->returns_abnormally;
warn_about_return_type = p->warn_about_return_type;
}
/* The functions below are required for functionality of doing
function at once processing in the C front end. Currently these
functions are not called from anywhere in the C front end, but as
these changes continue, that will change. */
/* Returns the stmt_tree (if any) to which statements are currently
being added. If there is no active statement-tree, NULL is
returned. */
stmt_tree
current_stmt_tree (void)
{
return &c_stmt_tree;
}
/* Return the global value of T as a symbol. */
tree
identifier_global_value (tree t)
{
struct c_binding *b;
for (b = I_SYMBOL_BINDING (t); b; b = b->shadowed)
if (B_IN_FILE_SCOPE (b) || B_IN_EXTERNAL_SCOPE (b))
return b->decl;
return 0;
}
/* In C, the only C-linkage public declaration is at file scope. */
tree
c_linkage_bindings (tree name)
{
return identifier_global_value (name);
}
/* Record a builtin type for C. If NAME is non-NULL, it is the name used;
otherwise the name is found in ridpointers from RID_INDEX. */
void
record_builtin_type (enum rid rid_index, const char *name, tree type)
{
tree id, decl;
if (name == 0)
id = ridpointers[(int) rid_index];
else
id = get_identifier (name);
decl = build_decl (UNKNOWN_LOCATION, TYPE_DECL, id, type);
pushdecl (decl);
if (debug_hooks->type_decl)
debug_hooks->type_decl (decl, false);
}
/* Build the void_list_node (void_type_node having been created). */
tree
build_void_list_node (void)
{
tree t = build_tree_list (NULL_TREE, void_type_node);
return t;
}
/* Return a c_parm structure with the given SPECS, ATTRS and DECLARATOR. */
struct c_parm *
build_c_parm (struct c_declspecs *specs, tree attrs,
struct c_declarator *declarator)
{
struct c_parm *ret = XOBNEW (&parser_obstack, struct c_parm);
ret->specs = specs;
ret->attrs = attrs;
ret->declarator = declarator;
return ret;
}
/* Return a declarator with nested attributes. TARGET is the inner
declarator to which these attributes apply. ATTRS are the
attributes. */
struct c_declarator *
build_attrs_declarator (tree attrs, struct c_declarator *target)
{
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
ret->kind = cdk_attrs;
ret->declarator = target;
ret->u.attrs = attrs;
return ret;
}
/* Return a declarator for a function with arguments specified by ARGS
and return type specified by TARGET. */
struct c_declarator *
build_function_declarator (struct c_arg_info *args,
struct c_declarator *target)
{
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
ret->kind = cdk_function;
ret->declarator = target;
ret->u.arg_info = args;
return ret;
}
/* Return a declarator for the identifier IDENT (which may be
NULL_TREE for an abstract declarator). */
struct c_declarator *
build_id_declarator (tree ident)
{
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
ret->kind = cdk_id;
ret->declarator = 0;
ret->u.id = ident;
/* Default value - may get reset to a more precise location. */
ret->id_loc = input_location;
return ret;
}
/* Return something to represent absolute declarators containing a *.
TARGET is the absolute declarator that the * contains.
TYPE_QUALS_ATTRS is a structure for type qualifiers and attributes
to apply to the pointer type. */
struct c_declarator *
make_pointer_declarator (struct c_declspecs *type_quals_attrs,
struct c_declarator *target)
{
tree attrs;
int quals = 0;
struct c_declarator *itarget = target;
struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator);
if (type_quals_attrs)
{
attrs = type_quals_attrs->attrs;
quals = quals_from_declspecs (type_quals_attrs);
if (attrs != NULL_TREE)
itarget = build_attrs_declarator (attrs, target);
}
ret->kind = cdk_pointer;
ret->declarator = itarget;
ret->u.pointer_quals = quals;
return ret;
}
/* Return a pointer to a structure for an empty list of declaration
specifiers. */
struct c_declspecs *
build_null_declspecs (void)
{
struct c_declspecs *ret = XOBNEW (&parser_obstack, struct c_declspecs);
memset (&ret->locations, 0, cdw_number_of_elements);
ret->type = 0;
ret->expr = 0;
ret->decl_attr = 0;
ret->attrs = 0;
ret->align_log = -1;
ret->typespec_word = cts_none;
ret->storage_class = csc_none;
ret->expr_const_operands = true;
ret->declspecs_seen_p = false;
ret->typespec_kind = ctsk_none;
ret->non_sc_seen_p = false;
ret->typedef_p = false;
ret->explicit_signed_p = false;
ret->deprecated_p = false;
ret->default_int_p = false;
ret->long_p = false;
ret->long_long_p = false;
ret->short_p = false;
ret->signed_p = false;
ret->unsigned_p = false;
ret->complex_p = false;
ret->inline_p = false;
ret->noreturn_p = false;
ret->thread_p = false;
ret->thread_gnu_p = false;
ret->const_p = false;
ret->volatile_p = false;
ret->atomic_p = false;
ret->restrict_p = false;
ret->saturating_p = false;
ret->alignas_p = false;
ret->address_space = ADDR_SPACE_GENERIC;
return ret;
}
/* Add the address space ADDRSPACE to the declaration specifiers
SPECS, returning SPECS. */
struct c_declspecs *
declspecs_add_addrspace (source_location location,
struct c_declspecs *specs, addr_space_t as)
{
specs->non_sc_seen_p = true;
specs->declspecs_seen_p = true;
if (!ADDR_SPACE_GENERIC_P (specs->address_space)
&& specs->address_space != as)
error ("incompatible address space qualifiers %qs and %qs",
c_addr_space_name (as),
c_addr_space_name (specs->address_space));
else
{
specs->address_space = as;
specs->locations[cdw_address_space] = location;
}
return specs;
}
/* Add the type qualifier QUAL to the declaration specifiers SPECS,
returning SPECS. */
struct c_declspecs *
declspecs_add_qual (source_location loc,
struct c_declspecs *specs, tree qual)
{
enum rid i;
bool dupe = false;
specs->non_sc_seen_p = true;
specs->declspecs_seen_p = true;
gcc_assert (TREE_CODE (qual) == IDENTIFIER_NODE
&& C_IS_RESERVED_WORD (qual));
i = C_RID_CODE (qual);
switch (i)
{
case RID_CONST:
dupe = specs->const_p;
specs->const_p = true;
specs->locations[cdw_const] = loc;
break;
case RID_VOLATILE:
dupe = specs->volatile_p;
specs->volatile_p = true;
specs->locations[cdw_volatile] = loc;
break;
case RID_RESTRICT:
dupe = specs->restrict_p;
specs->restrict_p = true;
specs->locations[cdw_restrict] = loc;
break;
case RID_ATOMIC:
dupe = specs->atomic_p;
specs->atomic_p = true;
break;
default:
gcc_unreachable ();
}
if (dupe)
pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %qE", qual);
return specs;
}
/* Add the type specifier TYPE to the declaration specifiers SPECS,
returning SPECS. */
struct c_declspecs *
declspecs_add_type (location_t loc, struct c_declspecs *specs,
struct c_typespec spec)
{
tree type = spec.spec;
specs->non_sc_seen_p = true;
specs->declspecs_seen_p = true;
specs->typespec_kind = spec.kind;
if (TREE_DEPRECATED (type))
specs->deprecated_p = true;
/* Handle type specifier keywords. */
if (TREE_CODE (type) == IDENTIFIER_NODE
&& C_IS_RESERVED_WORD (type)
&& C_RID_CODE (type) != RID_CXX_COMPAT_WARN)
{
enum rid i = C_RID_CODE (type);
if (specs->type)
{
error_at (loc, "two or more data types in declaration specifiers");
return specs;
}
if ((int) i <= (int) RID_LAST_MODIFIER)
{
/* "long", "short", "signed", "unsigned", "_Complex" or "_Sat". */
bool dupe = false;
switch (i)
{
case RID_LONG:
if (specs->long_long_p)
{
error_at (loc, "%<long long long%> is too long for GCC");
break;
}
if (specs->long_p)
{
if (specs->typespec_word == cts_double)
{
error_at (loc,
("both %<long long%> and %<double%> in "
"declaration specifiers"));
break;
}
pedwarn_c90 (loc, OPT_Wlong_long,
"ISO C90 does not support %<long long%>");
specs->long_long_p = 1;
specs->locations[cdw_long_long] = loc;
break;
}
if (specs->short_p)
error_at (loc,
("both %<long%> and %<short%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<long%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<long%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_int_n)
error_at (loc,
("both %<long%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<long%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_char)
error_at (loc,
("both %<long%> and %<char%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<long%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<long%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<long%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<long%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->long_p = true;
specs->locations[cdw_long] = loc;
}
break;
case RID_SHORT:
dupe = specs->short_p;
if (specs->long_p)
error_at (loc,
("both %<long%> and %<short%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<short%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<short%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_int_n)
error_at (loc,
("both %<short%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<short%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_char)
error_at (loc,
("both %<short%> and %<char%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<short%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<short%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<short%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<short%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<short%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->short_p = true;
specs->locations[cdw_short] = loc;
}
break;
case RID_SIGNED:
dupe = specs->signed_p;
if (specs->unsigned_p)
error_at (loc,
("both %<signed%> and %<unsigned%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<signed%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<signed%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<signed%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<signed%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<signed%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<signed%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<signed%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<signed%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->signed_p = true;
specs->locations[cdw_signed] = loc;
}
break;
case RID_UNSIGNED:
dupe = specs->unsigned_p;
if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<unsigned%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<unsigned%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<unsigned%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<unsigned%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<unsigned%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<unsigned%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<unsigned%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<unsigned%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<unsigned%> and %<_Decimal128%> in "
"declaration specifiers"));
else
{
specs->unsigned_p = true;
specs->locations[cdw_unsigned] = loc;
}
break;
case RID_COMPLEX:
dupe = specs->complex_p;
if (!in_system_header_at (loc))
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support complex types");
if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<complex%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<complex%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<complex%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<complex%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<complex%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<complex%> and %<_Decimal128%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_fract)
error_at (loc,
("both %<complex%> and %<_Fract%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_accum)
error_at (loc,
("both %<complex%> and %<_Accum%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<complex%> and %<_Sat%> in "
"declaration specifiers"));
else
{
specs->complex_p = true;
specs->locations[cdw_complex] = loc;
}
break;
case RID_SAT:
dupe = specs->saturating_p;
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support saturating types");
if (specs->typespec_word == cts_int_n)
{
error_at (loc,
("both %<_Sat%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
}
else if (specs->typespec_word == cts_auto_type)
error_at (loc,
("both %<_Sat%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_void)
error_at (loc,
("both %<_Sat%> and %<void%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_bool)
error_at (loc,
("both %<_Sat%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_char)
error_at (loc,
("both %<_Sat%> and %<char%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_int)
error_at (loc,
("both %<_Sat%> and %<int%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_float)
error_at (loc,
("both %<_Sat%> and %<float%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_double)
error_at (loc,
("both %<_Sat%> and %<double%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat32)
error_at (loc,
("both %<_Sat%> and %<_Decimal32%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat64)
error_at (loc,
("both %<_Sat%> and %<_Decimal64%> in "
"declaration specifiers"));
else if (specs->typespec_word == cts_dfloat128)
error_at (loc,
("both %<_Sat%> and %<_Decimal128%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<_Sat%> and %<complex%> in "
"declaration specifiers"));
else
{
specs->saturating_p = true;
specs->locations[cdw_saturating] = loc;
}
break;
default:
gcc_unreachable ();
}
if (dupe)
error_at (loc, "duplicate %qE", type);
return specs;
}
else
{
/* "void", "_Bool", "char", "int", "float", "double", "_Decimal32",
"__intN", "_Decimal64", "_Decimal128", "_Fract", "_Accum" or
"__auto_type". */
if (specs->typespec_word != cts_none)
{
error_at (loc,
"two or more data types in declaration specifiers");
return specs;
}
switch (i)
{
case RID_AUTO_TYPE:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<__auto_type%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<__auto_type%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_auto_type;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_INT_N_0:
case RID_INT_N_1:
case RID_INT_N_2:
case RID_INT_N_3:
specs->int_n_idx = i - RID_INT_N_0;
if (!in_system_header_at (input_location))
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support %<__int%d%> types",
int_n_data[specs->int_n_idx].bitsize);
if (specs->long_p)
error_at (loc,
("both %<__int%d%> and %<long%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<__int%d%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (specs->short_p)
error_at (loc,
("both %<__int%d%> and %<short%> in "
"declaration specifiers"),
int_n_data[specs->int_n_idx].bitsize);
else if (! int_n_enabled_p [specs->int_n_idx])
error_at (loc,
"%<__int%d%> is not supported on this target",
int_n_data[specs->int_n_idx].bitsize);
else
{
specs->typespec_word = cts_int_n;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_VOID:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<void%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<void%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<void%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<void%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<void%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<void%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_void;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_BOOL:
if (!in_system_header_at (loc))
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support boolean types");
if (specs->long_p)
error_at (loc,
("both %<long%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<_Bool%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<_Bool%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_bool;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_CHAR:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<char%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<char%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<char%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_char;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_INT:
if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<int%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_int;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_FLOAT:
if (specs->long_p)
error_at (loc,
("both %<long%> and %<float%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<float%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<float%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<float%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<float%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_float;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_DOUBLE:
if (specs->long_long_p)
error_at (loc,
("both %<long long%> and %<double%> in "
"declaration specifiers"));
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<double%> in "
"declaration specifiers"));
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<double%> in "
"declaration specifiers"));
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<double%> in "
"declaration specifiers"));
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<double%> in "
"declaration specifiers"));
else
{
specs->typespec_word = cts_double;
specs->locations[cdw_typespec] = loc;
}
return specs;
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
{
const char *str;
if (i == RID_DFLOAT32)
str = "_Decimal32";
else if (i == RID_DFLOAT64)
str = "_Decimal64";
else
str = "_Decimal128";
if (specs->long_long_p)
error_at (loc,
("both %<long long%> and %<%s%> in "
"declaration specifiers"),
str);
if (specs->long_p)
error_at (loc,
("both %<long%> and %<%s%> in "
"declaration specifiers"),
str);
else if (specs->short_p)
error_at (loc,
("both %<short%> and %<%s%> in "
"declaration specifiers"),
str);
else if (specs->signed_p)
error_at (loc,
("both %<signed%> and %<%s%> in "
"declaration specifiers"),
str);
else if (specs->unsigned_p)
error_at (loc,
("both %<unsigned%> and %<%s%> in "
"declaration specifiers"),
str);
else if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<%s%> in "
"declaration specifiers"),
str);
else if (specs->saturating_p)
error_at (loc,
("both %<_Sat%> and %<%s%> in "
"declaration specifiers"),
str);
else if (i == RID_DFLOAT32)
specs->typespec_word = cts_dfloat32;
else if (i == RID_DFLOAT64)
specs->typespec_word = cts_dfloat64;
else
specs->typespec_word = cts_dfloat128;
specs->locations[cdw_typespec] = loc;
}
if (!targetm.decimal_float_supported_p ())
error_at (loc,
("decimal floating point not supported "
"for this target"));
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support decimal floating point");
return specs;
case RID_FRACT:
case RID_ACCUM:
{
const char *str;
if (i == RID_FRACT)
str = "_Fract";
else
str = "_Accum";
if (specs->complex_p)
error_at (loc,
("both %<complex%> and %<%s%> in "
"declaration specifiers"),
str);
else if (i == RID_FRACT)
specs->typespec_word = cts_fract;
else
specs->typespec_word = cts_accum;
specs->locations[cdw_typespec] = loc;
}
if (!targetm.fixed_point_supported_p ())
error_at (loc,
"fixed-point types not supported for this target");
pedwarn (loc, OPT_Wpedantic,
"ISO C does not support fixed-point types");
return specs;
default:
/* ObjC reserved word "id", handled below. */
break;
}
}
}
/* Now we have a typedef (a TYPE_DECL node), an identifier (some
form of ObjC type, cases such as "int" and "long" being handled
above), a TYPE (struct, union, enum and typeof specifiers) or an
ERROR_MARK. In none of these cases may there have previously
been any type specifiers. */
if (specs->type || specs->typespec_word != cts_none
|| specs->long_p || specs->short_p || specs->signed_p
|| specs->unsigned_p || specs->complex_p)
error_at (loc, "two or more data types in declaration specifiers");
else if (TREE_CODE (type) == TYPE_DECL)
{
if (TREE_TYPE (type) == error_mark_node)
; /* Allow the type to default to int to avoid cascading errors. */
else
{
specs->type = TREE_TYPE (type);
specs->decl_attr = DECL_ATTRIBUTES (type);
specs->typedef_p = true;
specs->explicit_signed_p = C_TYPEDEF_EXPLICITLY_SIGNED (type);
specs->locations[cdw_typedef] = loc;
/* If this typedef name is defined in a struct, then a C++
lookup would return a different value. */
if (warn_cxx_compat
&& I_SYMBOL_BINDING (DECL_NAME (type))->in_struct)
warning_at (loc, OPT_Wc___compat,
"C++ lookup of %qD would return a field, not a type",
type);
/* If we are parsing a struct, record that a struct field
used a typedef. */
if (warn_cxx_compat && struct_parse_info != NULL)
struct_parse_info->typedefs_seen.safe_push (type);
}
}
else if (TREE_CODE (type) == IDENTIFIER_NODE)
{
tree t = lookup_name (type);
if (!t || TREE_CODE (t) != TYPE_DECL)
error_at (loc, "%qE fails to be a typedef or built in type", type);
else if (TREE_TYPE (t) == error_mark_node)
;
else
{
specs->type = TREE_TYPE (t);
specs->locations[cdw_typespec] = loc;
}
}
else
{
if (TREE_CODE (type) != ERROR_MARK && spec.kind == ctsk_typeof)
{
specs->typedef_p = true;
specs->locations[cdw_typedef] = loc;
if (spec.expr)
{
if (specs->expr)
specs->expr = build2 (COMPOUND_EXPR, TREE_TYPE (spec.expr),
specs->expr, spec.expr);
else
specs->expr = spec.expr;
specs->expr_const_operands &= spec.expr_const_operands;
}
}
specs->type = type;
}
return specs;
}
/* Add the storage class specifier or function specifier SCSPEC to the
declaration specifiers SPECS, returning SPECS. */
struct c_declspecs *
declspecs_add_scspec (source_location loc,
struct c_declspecs *specs,
tree scspec)
{
enum rid i;
enum c_storage_class n = csc_none;
bool dupe = false;
specs->declspecs_seen_p = true;
gcc_assert (TREE_CODE (scspec) == IDENTIFIER_NODE
&& C_IS_RESERVED_WORD (scspec));
i = C_RID_CODE (scspec);
if (specs->non_sc_seen_p)
warning (OPT_Wold_style_declaration,
"%qE is not at beginning of declaration", scspec);
switch (i)
{
case RID_INLINE:
/* C99 permits duplicate inline. Although of doubtful utility,
it seems simplest to permit it in gnu89 mode as well, as
there is also little utility in maintaining this as a
difference between gnu89 and C99 inline. */
dupe = false;
specs->inline_p = true;
specs->locations[cdw_inline] = loc;
break;
case RID_NORETURN:
/* Duplicate _Noreturn is permitted. */
dupe = false;
specs->noreturn_p = true;
specs->locations[cdw_noreturn] = loc;
break;
case RID_THREAD:
dupe = specs->thread_p;
if (specs->storage_class == csc_auto)
error ("%qE used with %<auto%>", scspec);
else if (specs->storage_class == csc_register)
error ("%qE used with %<register%>", scspec);
else if (specs->storage_class == csc_typedef)
error ("%qE used with %<typedef%>", scspec);
else
{
specs->thread_p = true;
specs->thread_gnu_p = (strcmp (IDENTIFIER_POINTER (scspec),
"__thread") == 0);
/* A diagnostic is not required for the use of this
identifier in the implementation namespace; only diagnose
it for the C11 spelling because of existing code using
the other spelling. */
if (!specs->thread_gnu_p)
{
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 does not support %qE", scspec);
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 does not support %qE", scspec);
}
specs->locations[cdw_thread] = loc;
}
break;
case RID_AUTO:
n = csc_auto;
break;
case RID_EXTERN:
n = csc_extern;
/* Diagnose "__thread extern". */
if (specs->thread_p && specs->thread_gnu_p)
error ("%<__thread%> before %<extern%>");
break;
case RID_REGISTER:
n = csc_register;
break;
case RID_STATIC:
n = csc_static;
/* Diagnose "__thread static". */
if (specs->thread_p && specs->thread_gnu_p)
error ("%<__thread%> before %<static%>");
break;
case RID_TYPEDEF:
n = csc_typedef;
break;
default:
gcc_unreachable ();
}
if (n != csc_none && n == specs->storage_class)
dupe = true;
if (dupe)
{
if (i == RID_THREAD)
error ("duplicate %<_Thread_local%> or %<__thread%>");
else
error ("duplicate %qE", scspec);
}
if (n != csc_none)
{
if (specs->storage_class != csc_none && n != specs->storage_class)
{
error ("multiple storage classes in declaration specifiers");
}
else
{
specs->storage_class = n;
specs->locations[cdw_storage_class] = loc;
if (n != csc_extern && n != csc_static && specs->thread_p)
{
error ("%qs used with %qE",
specs->thread_gnu_p ? "__thread" : "_Thread_local",
scspec);
specs->thread_p = false;
}
}
}
return specs;
}
/* Add the attributes ATTRS to the declaration specifiers SPECS,
returning SPECS. */
struct c_declspecs *
declspecs_add_attrs (source_location loc, struct c_declspecs *specs, tree attrs)
{
specs->attrs = chainon (attrs, specs->attrs);
specs->locations[cdw_attributes] = loc;
specs->declspecs_seen_p = true;
return specs;
}
/* Add an _Alignas specifier (expression ALIGN, or type whose
alignment is ALIGN) to the declaration specifiers SPECS, returning
SPECS. */
struct c_declspecs *
declspecs_add_alignas (source_location loc,
struct c_declspecs *specs, tree align)
{
int align_log;
specs->alignas_p = true;
specs->locations[cdw_alignas] = loc;
if (align == error_mark_node)
return specs;
align_log = check_user_alignment (align, true);
if (align_log > specs->align_log)
specs->align_log = align_log;
return specs;
}
/* Combine "long", "short", "signed", "unsigned" and "_Complex" type
specifiers with any other type specifier to determine the resulting
type. This is where ISO C checks on complex types are made, since
"_Complex long" is a prefix of the valid ISO C type "_Complex long
double". */
struct c_declspecs *
finish_declspecs (struct c_declspecs *specs)
{
/* If a type was specified as a whole, we have no modifiers and are
done. */
if (specs->type != NULL_TREE)
{
gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
/* Set a dummy type. */
if (TREE_CODE (specs->type) == ERROR_MARK)
specs->type = integer_type_node;
return specs;
}
/* If none of "void", "_Bool", "char", "int", "float" or "double"
has been specified, treat it as "int" unless "_Complex" is
present and there are no other specifiers. If we just have
"_Complex", it is equivalent to "_Complex double", but e.g.
"_Complex short" is equivalent to "_Complex short int". */
if (specs->typespec_word == cts_none)
{
if (specs->saturating_p)
{
error_at (specs->locations[cdw_saturating],
"%<_Sat%> is used without %<_Fract%> or %<_Accum%>");
if (!targetm.fixed_point_supported_p ())
error_at (specs->locations[cdw_saturating],
"fixed-point types not supported for this target");
specs->typespec_word = cts_fract;
}
else if (specs->long_p || specs->short_p
|| specs->signed_p || specs->unsigned_p)
{
specs->typespec_word = cts_int;
}
else if (specs->complex_p)
{
specs->typespec_word = cts_double;
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support plain %<complex%> meaning "
"%<double complex%>");
}
else
{
specs->typespec_word = cts_int;
specs->default_int_p = true;
/* We don't diagnose this here because grokdeclarator will
give more specific diagnostics according to whether it is
a function definition. */
}
}
/* If "signed" was specified, record this to distinguish "int" and
"signed int" in the case of a bit-field with
-funsigned-bitfields. */
specs->explicit_signed_p = specs->signed_p;
/* Now compute the actual type. */
switch (specs->typespec_word)
{
case cts_auto_type:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
/* Type to be filled in later. */
break;
case cts_void:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
specs->type = void_type_node;
break;
case cts_bool:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p
&& !specs->complex_p);
specs->type = boolean_type_node;
break;
case cts_char:
gcc_assert (!specs->long_p && !specs->short_p);
gcc_assert (!(specs->signed_p && specs->unsigned_p));
if (specs->signed_p)
specs->type = signed_char_type_node;
else if (specs->unsigned_p)
specs->type = unsigned_char_type_node;
else
specs->type = char_type_node;
if (specs->complex_p)
{
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
break;
case cts_int_n:
gcc_assert (!specs->long_p && !specs->short_p && !specs->long_long_p);
gcc_assert (!(specs->signed_p && specs->unsigned_p));
specs->type = (specs->unsigned_p
? int_n_trees[specs->int_n_idx].unsigned_type
: int_n_trees[specs->int_n_idx].signed_type);
if (specs->complex_p)
{
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
break;
case cts_int:
gcc_assert (!(specs->long_p && specs->short_p));
gcc_assert (!(specs->signed_p && specs->unsigned_p));
if (specs->long_long_p)
specs->type = (specs->unsigned_p
? long_long_unsigned_type_node
: long_long_integer_type_node);
else if (specs->long_p)
specs->type = (specs->unsigned_p
? long_unsigned_type_node
: long_integer_type_node);
else if (specs->short_p)
specs->type = (specs->unsigned_p
? short_unsigned_type_node
: short_integer_type_node);
else
specs->type = (specs->unsigned_p
? unsigned_type_node
: integer_type_node);
if (specs->complex_p)
{
pedwarn (specs->locations[cdw_complex], OPT_Wpedantic,
"ISO C does not support complex integer types");
specs->type = build_complex_type (specs->type);
}
break;
case cts_float:
gcc_assert (!specs->long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p);
specs->type = (specs->complex_p
? complex_float_type_node
: float_type_node);
break;
case cts_double:
gcc_assert (!specs->long_long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p);
if (specs->long_p)
{
specs->type = (specs->complex_p
? complex_long_double_type_node
: long_double_type_node);
}
else
{
specs->type = (specs->complex_p
? complex_double_type_node
: double_type_node);
}
break;
case cts_dfloat32:
case cts_dfloat64:
case cts_dfloat128:
gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p
&& !specs->signed_p && !specs->unsigned_p && !specs->complex_p);
if (specs->typespec_word == cts_dfloat32)
specs->type = dfloat32_type_node;
else if (specs->typespec_word == cts_dfloat64)
specs->type = dfloat64_type_node;
else
specs->type = dfloat128_type_node;
break;
case cts_fract:
gcc_assert (!specs->complex_p);
if (!targetm.fixed_point_supported_p ())
specs->type = integer_type_node;
else if (specs->saturating_p)
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_long_fract_type_node
: sat_long_long_fract_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_fract_type_node
: sat_long_fract_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? sat_unsigned_short_fract_type_node
: sat_short_fract_type_node;
else
specs->type = specs->unsigned_p
? sat_unsigned_fract_type_node
: sat_fract_type_node;
}
else
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? unsigned_long_long_fract_type_node
: long_long_fract_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? unsigned_long_fract_type_node
: long_fract_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? unsigned_short_fract_type_node
: short_fract_type_node;
else
specs->type = specs->unsigned_p
? unsigned_fract_type_node
: fract_type_node;
}
break;
case cts_accum:
gcc_assert (!specs->complex_p);
if (!targetm.fixed_point_supported_p ())
specs->type = integer_type_node;
else if (specs->saturating_p)
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_long_accum_type_node
: sat_long_long_accum_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? sat_unsigned_long_accum_type_node
: sat_long_accum_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? sat_unsigned_short_accum_type_node
: sat_short_accum_type_node;
else
specs->type = specs->unsigned_p
? sat_unsigned_accum_type_node
: sat_accum_type_node;
}
else
{
if (specs->long_long_p)
specs->type = specs->unsigned_p
? unsigned_long_long_accum_type_node
: long_long_accum_type_node;
else if (specs->long_p)
specs->type = specs->unsigned_p
? unsigned_long_accum_type_node
: long_accum_type_node;
else if (specs->short_p)
specs->type = specs->unsigned_p
? unsigned_short_accum_type_node
: short_accum_type_node;
else
specs->type = specs->unsigned_p
? unsigned_accum_type_node
: accum_type_node;
}
break;
default:
gcc_unreachable ();
}
return specs;
}
/* A subroutine of c_write_global_declarations. Perform final processing
on one file scope's declarations (or the external scope's declarations),
GLOBALS. */
static void
c_write_global_declarations_1 (tree globals)
{
tree decl;
bool reconsider;
/* Process the decls in the order they were written. */
for (decl = globals; decl; decl = DECL_CHAIN (decl))
{
/* Check for used but undefined static functions using the C
standard's definition of "used", and set TREE_NO_WARNING so
that check_global_declarations doesn't repeat the check. */
if (TREE_CODE (decl) == FUNCTION_DECL
&& DECL_INITIAL (decl) == 0
&& DECL_EXTERNAL (decl)
&& !TREE_PUBLIC (decl)
&& C_DECL_USED (decl))
{
pedwarn (input_location, 0, "%q+F used but never defined", decl);
TREE_NO_WARNING (decl) = 1;
}
wrapup_global_declaration_1 (decl);
}
do
{
reconsider = false;
for (decl = globals; decl; decl = DECL_CHAIN (decl))
reconsider |= wrapup_global_declaration_2 (decl);
}
while (reconsider);
for (decl = globals; decl; decl = DECL_CHAIN (decl))
check_global_declaration_1 (decl);
}
/* A subroutine of c_write_global_declarations Emit debug information for each
of the declarations in GLOBALS. */
static void
c_write_global_declarations_2 (tree globals)
{
tree decl;
for (decl = globals; decl ; decl = DECL_CHAIN (decl))
debug_hooks->global_decl (decl);
}
/* Callback to collect a source_ref from a DECL. */
static void
collect_source_ref_cb (tree decl)
{
if (!DECL_IS_BUILTIN (decl))
collect_source_ref (LOCATION_FILE (decl_sloc (decl, false)));
}
/* Preserve the external declarations scope across a garbage collect. */
static GTY(()) tree ext_block;
/* Collect all references relevant to SOURCE_FILE. */
static void
collect_all_refs (const char *source_file)
{
tree t;
unsigned i;
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
collect_ada_nodes (BLOCK_VARS (DECL_INITIAL (t)), source_file);
collect_ada_nodes (BLOCK_VARS (ext_block), source_file);
}
/* Iterate over all global declarations and call CALLBACK. */
static void
for_each_global_decl (void (*callback) (tree decl))
{
tree t;
tree decls;
tree decl;
unsigned i;
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
{
decls = DECL_INITIAL (t);
for (decl = BLOCK_VARS (decls); decl; decl = TREE_CHAIN (decl))
callback (decl);
}
for (decl = BLOCK_VARS (ext_block); decl; decl = TREE_CHAIN (decl))
callback (decl);
}
void
c_write_global_declarations (void)
{
tree t;
unsigned i;
/* We don't want to do this if generating a PCH. */
if (pch_file)
return;
timevar_start (TV_PHASE_DEFERRED);
/* Do the Objective-C stuff. This is where all the Objective-C
module stuff gets generated (symtab, class/protocol/selector
lists etc). */
if (c_dialect_objc ())
objc_write_global_declarations ();
/* Close the external scope. */
ext_block = pop_scope ();
external_scope = 0;
gcc_assert (!current_scope);
/* Handle -fdump-ada-spec[-slim]. */
if (flag_dump_ada_spec || flag_dump_ada_spec_slim)
{
/* Build a table of files to generate specs for */
if (flag_dump_ada_spec_slim)
collect_source_ref (main_input_filename);
else
for_each_global_decl (collect_source_ref_cb);
dump_ada_specs (collect_all_refs, NULL);
}
if (ext_block)
{
tree tmp = BLOCK_VARS (ext_block);
int flags;
FILE * stream = dump_begin (TDI_tu, &flags);
if (stream && tmp)
{
dump_node (tmp, flags & ~TDF_SLIM, stream);
dump_end (TDI_tu, stream);
}
}
/* Process all file scopes in this compilation, and the external_scope,
through wrapup_global_declarations and check_global_declarations. */
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
c_write_global_declarations_1 (BLOCK_VARS (DECL_INITIAL (t)));
c_write_global_declarations_1 (BLOCK_VARS (ext_block));
timevar_stop (TV_PHASE_DEFERRED);
timevar_start (TV_PHASE_OPT_GEN);
/* We're done parsing; proceed to optimize and emit assembly.
FIXME: shouldn't be the front end's responsibility to call this. */
symtab->finalize_compilation_unit ();
timevar_stop (TV_PHASE_OPT_GEN);
timevar_start (TV_PHASE_DBGINFO);
/* After cgraph has had a chance to emit everything that's going to
be emitted, output debug information for globals. */
if (!seen_error ())
{
timevar_push (TV_SYMOUT);
FOR_EACH_VEC_ELT (*all_translation_units, i, t)
c_write_global_declarations_2 (BLOCK_VARS (DECL_INITIAL (t)));
c_write_global_declarations_2 (BLOCK_VARS (ext_block));
timevar_pop (TV_SYMOUT);
}
ext_block = NULL;
timevar_stop (TV_PHASE_DBGINFO);
}
/* Register reserved keyword WORD as qualifier for address space AS. */
void
c_register_addr_space (const char *word, addr_space_t as)
{
int rid = RID_FIRST_ADDR_SPACE + as;
tree id;
/* Address space qualifiers are only supported
in C with GNU extensions enabled. */
if (c_dialect_objc () || flag_no_asm)
return;
id = get_identifier (word);
C_SET_RID_CODE (id, rid);
C_IS_RESERVED_WORD (id) = 1;
ridpointers [rid] = id;
}
/* Return identifier to look up for omp declare reduction. */
tree
c_omp_reduction_id (enum tree_code reduction_code, tree reduction_id)
{
const char *p = NULL;
switch (reduction_code)
{
case PLUS_EXPR: p = "+"; break;
case MULT_EXPR: p = "*"; break;
case MINUS_EXPR: p = "-"; break;
case BIT_AND_EXPR: p = "&"; break;
case BIT_XOR_EXPR: p = "^"; break;
case BIT_IOR_EXPR: p = "|"; break;
case TRUTH_ANDIF_EXPR: p = "&&"; break;
case TRUTH_ORIF_EXPR: p = "||"; break;
case MIN_EXPR: p = "min"; break;
case MAX_EXPR: p = "max"; break;
default:
break;
}
if (p == NULL)
{
if (TREE_CODE (reduction_id) != IDENTIFIER_NODE)
return error_mark_node;
p = IDENTIFIER_POINTER (reduction_id);
}
const char prefix[] = "omp declare reduction ";
size_t lenp = sizeof (prefix);
size_t len = strlen (p);
char *name = XALLOCAVEC (char, lenp + len);
memcpy (name, prefix, lenp - 1);
memcpy (name + lenp - 1, p, len + 1);
return get_identifier (name);
}
/* Lookup REDUCTION_ID in the current scope, or create an artificial
VAR_DECL, bind it into the current scope and return it. */
tree
c_omp_reduction_decl (tree reduction_id)
{
struct c_binding *b = I_SYMBOL_BINDING (reduction_id);
if (b != NULL && B_IN_CURRENT_SCOPE (b))
return b->decl;
tree decl = build_decl (BUILTINS_LOCATION, VAR_DECL,
reduction_id, integer_type_node);
DECL_ARTIFICIAL (decl) = 1;
DECL_EXTERNAL (decl) = 1;
TREE_STATIC (decl) = 1;
TREE_PUBLIC (decl) = 0;
bind (reduction_id, decl, current_scope, true, false, BUILTINS_LOCATION);
return decl;
}
/* Lookup REDUCTION_ID in the first scope where it has entry for TYPE. */
tree
c_omp_reduction_lookup (tree reduction_id, tree type)
{
struct c_binding *b = I_SYMBOL_BINDING (reduction_id);
while (b)
{
tree t;
for (t = DECL_INITIAL (b->decl); t; t = TREE_CHAIN (t))
if (comptypes (TREE_PURPOSE (t), type))
return TREE_VALUE (t);
b = b->shadowed;
}
return error_mark_node;
}
/* Helper function called via walk_tree, to diagnose invalid
#pragma omp declare reduction combiners or initializers. */
tree
c_check_omp_declare_reduction_r (tree *tp, int *, void *data)
{
tree *vars = (tree *) data;
if (SSA_VAR_P (*tp)
&& !DECL_ARTIFICIAL (*tp)
&& *tp != vars[0]
&& *tp != vars[1])
{
location_t loc = DECL_SOURCE_LOCATION (vars[0]);
if (strcmp (IDENTIFIER_POINTER (DECL_NAME (vars[0])), "omp_out") == 0)
error_at (loc, "%<#pragma omp declare reduction%> combiner refers to "
"variable %qD which is not %<omp_out%> nor %<omp_in%>",
*tp);
else
error_at (loc, "%<#pragma omp declare reduction%> initializer refers "
"to variable %qD which is not %<omp_priv%> nor "
"%<omp_orig%>",
*tp);
return *tp;
}
return NULL_TREE;
}
#include "gt-c-c-decl.h"
|
residual_based_bdf_displacement_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUAL_BASED_BDF_DISPLACEMENT_SCHEME )
#define KRATOS_RESIDUAL_BASED_BDF_DISPLACEMENT_SCHEME
/* System includes */
/* External includes */
/* Project includes */
#include "solving_strategies/schemes/residual_based_bdf_scheme.h"
#include "includes/variables.h"
#include "includes/checks.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedBDFDisplacementScheme
* @ingroup KratosCore
* @brief BDF integration scheme (displacement based)
* @details The \f$ n \f$ order Backward Differentiation Formula (BDF) method is a two step \f$ n \f$ order accurate method.
* Look at the base class for more details
* @see ResidualBasedBDFScheme
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace>
class ResidualBasedBDFDisplacementScheme
: public ResidualBasedBDFScheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ResidualBasedBDFDisplacementScheme
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedBDFDisplacementScheme );
/// Base class definition
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef ResidualBasedImplicitTimeScheme<TSparseSpace,TDenseSpace> ImplicitBaseType;
typedef ResidualBasedBDFScheme<TSparseSpace,TDenseSpace> BDFBaseType;
typedef ResidualBasedBDFDisplacementScheme<TSparseSpace, TDenseSpace> ClassType;
/// Data type definition
typedef typename BDFBaseType::TDataType TDataType;
/// Matrix type definition
typedef typename BDFBaseType::TSystemMatrixType TSystemMatrixType;
/// Vector type definition
typedef typename BDFBaseType::TSystemVectorType TSystemVectorType;
/// Local system matrix type definition
typedef typename BDFBaseType::LocalSystemVectorType LocalSystemVectorType;
/// Local system vector type definition
typedef typename BDFBaseType::LocalSystemMatrixType LocalSystemMatrixType;
/// DoF array type definition
typedef typename BDFBaseType::DofsArrayType DofsArrayType;
/// DoF vector type definition
typedef typename Element::DofsVectorType DofsVectorType;
/// Nodes containers definition
typedef ModelPart::NodesContainerType NodesArrayType;
/// Elements containers definition
typedef ModelPart::ElementsContainerType ElementsArrayType;
/// Conditions containers definition
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef double ComponentType;
///@}
///@name Life Cycle
///@{
/**
* @brief Constructor. The BDF method (parameters)
* @param ThisParameters Parameters with the integration order
*/
explicit ResidualBasedBDFDisplacementScheme(Parameters ThisParameters)
: BDFBaseType()
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* @brief Constructor. The BDF method
* @param Order The integration order
* @todo The ideal would be to use directly the dof or the variable itself to identify the type of variable and is derivatives
*/
explicit ResidualBasedBDFDisplacementScheme(const std::size_t Order = 2)
:BDFBaseType(Order)
{
}
/** Copy Constructor.
*/
explicit ResidualBasedBDFDisplacementScheme(ResidualBasedBDFDisplacementScheme& rOther)
:BDFBaseType(rOther)
{
}
/**
* Clone
*/
typename BaseType::Pointer Clone() override
{
return Kratos::make_shared<ResidualBasedBDFDisplacementScheme>(*this);
}
/** Destructor.
*/
~ResidualBasedBDFDisplacementScheme
() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Create method
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(Parameters ThisParameters) const override
{
return Kratos::make_shared<ClassType>(ThisParameters);
}
/**
* @brief It initializes time step solution. Only for reasons if the time step solution is restarted
* @param rModelPart The model part of the problem to solve
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
// The current process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
BDFBaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
// Updating time derivatives (nodally for efficiency)
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
const auto it_node_begin = rModelPart.Nodes().begin();
// Getting dimension
KRATOS_WARNING_IF("ResidualBasedBDFDisplacementScheme", !r_current_process_info.Has(DOMAIN_SIZE)) << "DOMAIN_SIZE not defined. Please define DOMAIN_SIZE. 3D case will be assumed" << std::endl;
const std::size_t dimension = r_current_process_info.Has(DOMAIN_SIZE) ? r_current_process_info.GetValue(DOMAIN_SIZE) : 3;
// Getting position
const int velpos = it_node_begin->HasDofFor(VELOCITY_X) ? it_node_begin->GetDofPosition(VELOCITY_X) : -1;
const int accelpos = it_node_begin->HasDofFor(ACCELERATION_X) ? it_node_begin->GetDofPosition(ACCELERATION_X) : -1;
std::array<bool, 3> fixed = {false, false, false};
const std::array<const Variable<ComponentType>*, 3> disp_components = {&DISPLACEMENT_X, &DISPLACEMENT_Y, &DISPLACEMENT_Z};
const std::array<const Variable<ComponentType>*, 3> vel_components = {&VELOCITY_X, &VELOCITY_Y, &VELOCITY_Z};
const std::array<const Variable<ComponentType>*, 3> accel_components = {&ACCELERATION_X, &ACCELERATION_Y, &ACCELERATION_Z};
#pragma omp parallel for private(fixed)
for(int i = 0; i < num_nodes; ++i) {
auto it_node = it_node_begin + i;
for (std::size_t i_dim = 0; i_dim < dimension; ++i_dim)
fixed[i_dim] = false;
if (accelpos > -1) {
for (std::size_t i_dim = 0; i_dim < dimension; ++i_dim) {
if (it_node->GetDof(*accel_components[i_dim], accelpos + i_dim).IsFixed()) {
it_node->Fix(*disp_components[i_dim]);
fixed[i_dim] = true;
}
}
}
if (velpos > -1) {
for (std::size_t i_dim = 0; i_dim < dimension; ++i_dim) {
if (it_node->GetDof(*vel_components[i_dim], velpos + i_dim).IsFixed() && !fixed[i_dim]) {
it_node->Fix(*disp_components[i_dim]);
}
}
}
}
KRATOS_CATCH("ResidualBasedBDFDisplacementScheme.InitializeSolutionStep");
}
/**
* @brief Performing the prediction of the solution
* @details It predicts the solution for the current step x = xold + vold * Dt
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param A LHS matrix
* @param Dx Incremental update of primary variables
* @param b RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY;
// The current process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
const double delta_time = r_current_process_info[DELTA_TIME];
// Updating time derivatives (nodally for efficiency)
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
const auto it_node_begin = rModelPart.Nodes().begin();
// Getting position
KRATOS_ERROR_IF_NOT(it_node_begin->HasDofFor(DISPLACEMENT_X)) << "ResidualBasedBDFDisplacementScheme:: DISPLACEMENT is not added" << std::endl;
const int disppos = it_node_begin->GetDofPosition(DISPLACEMENT_X);
const int velpos = it_node_begin->HasDofFor(VELOCITY_X) ? it_node_begin->GetDofPosition(VELOCITY_X) : -1;
const int accelpos = it_node_begin->HasDofFor(ACCELERATION_X) ? it_node_begin->GetDofPosition(ACCELERATION_X) : -1;
// Getting dimension
KRATOS_WARNING_IF("ResidualBasedBDFDisplacementScheme", !r_current_process_info.Has(DOMAIN_SIZE)) << "DOMAIN_SIZE not defined. Please define DOMAIN_SIZE. 3D case will be assumed" << std::endl;
const std::size_t dimension = r_current_process_info.Has(DOMAIN_SIZE) ? r_current_process_info.GetValue(DOMAIN_SIZE) : 3;
// Auxiliar variables
std::array<bool, 3> predicted = {false, false, false};
const std::array<const Variable<ComponentType>*, 3> disp_components = {&DISPLACEMENT_X, &DISPLACEMENT_Y, &DISPLACEMENT_Z};
const std::array<const Variable<ComponentType>*, 3> vel_components = {&VELOCITY_X, &VELOCITY_Y, &VELOCITY_Z};
const std::array<const Variable<ComponentType>*, 3> accel_components = {&ACCELERATION_X, &ACCELERATION_Y, &ACCELERATION_Z};
#pragma omp parallel for private(predicted)
for(int i = 0; i< num_nodes; ++i) {
auto it_node = it_node_begin + i;
for (std::size_t i_dim = 0; i_dim < dimension; ++i_dim)
predicted[i_dim] = false;
const array_1d<double, 3>& dot2un1 = it_node->FastGetSolutionStepValue(ACCELERATION, 1);
const array_1d<double, 3>& dotun1 = it_node->FastGetSolutionStepValue(VELOCITY, 1);
const array_1d<double, 3>& un1 = it_node->FastGetSolutionStepValue(DISPLACEMENT, 1);
const array_1d<double, 3>& dot2un0 = it_node->FastGetSolutionStepValue(ACCELERATION);
array_1d<double, 3>& dotun0 = it_node->FastGetSolutionStepValue(VELOCITY);
array_1d<double, 3>& un0 = it_node->FastGetSolutionStepValue(DISPLACEMENT);
if (accelpos > -1) {
for (std::size_t i_dim = 0; i_dim < dimension; ++i_dim) {
if (it_node->GetDof(*accel_components[i_dim], accelpos + i_dim).IsFixed()) {
dotun0[i_dim] = dot2un0[i_dim];
for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order)
dotun0[i_dim] -= BDFBaseType::mBDF[i_order] * it_node->FastGetSolutionStepValue(*vel_components[i_dim], i_order);
dotun0[i_dim] /= BDFBaseType::mBDF[i_dim];
un0[i_dim] = dotun0[i_dim];
for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order)
un0[i_dim] -= BDFBaseType::mBDF[i_order] * it_node->FastGetSolutionStepValue(*disp_components[i_dim], i_order);
un0[i_dim] /= BDFBaseType::mBDF[i_dim];
predicted[i_dim] = true;
}
}
}
if (velpos > -1) {
for (std::size_t i_dim = 0; i_dim < dimension; ++i_dim) {
if (it_node->GetDof(*vel_components[i_dim], velpos + i_dim).IsFixed() && !predicted[i_dim]) {
un0[i_dim] = dotun0[i_dim];
for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order)
un0[i_dim] -= BDFBaseType::mBDF[i_order] * it_node->FastGetSolutionStepValue(*disp_components[i_dim], i_order);
un0[i_dim] /= BDFBaseType::mBDF[i_dim];
predicted[i_dim] = true;
}
}
}
for (std::size_t i_dim = 0; i_dim < dimension; ++i_dim) {
if (!it_node->GetDof(*disp_components[i_dim], disppos + i_dim).IsFixed() && !predicted[i_dim]) {
un0[i_dim] = un1[i_dim] + delta_time * dotun1[i_dim] + 0.5 * std::pow(delta_time, 2) * dot2un1[i_dim];
}
}
// Updating time derivatives
UpdateFirstDerivative(it_node);
UpdateSecondDerivative(it_node);
}
KRATOS_CATCH( "" );
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided.
* @details Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model of the problem to solve
* @return Zero means all ok
*/
int Check(const ModelPart& rModelPart) const override
{
KRATOS_TRY;
const int err = BDFBaseType::Check(rModelPart);
if(err!=0) return err;
// Check for variables keys
// Verify that the variables are correctly initialized
KRATOS_CHECK_VARIABLE_KEY(DISPLACEMENT)
KRATOS_CHECK_VARIABLE_KEY(VELOCITY)
KRATOS_CHECK_VARIABLE_KEY(ACCELERATION)
// Check that variables are correctly allocated
for(auto& rnode : rModelPart.Nodes()) {
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DISPLACEMENT,rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(VELOCITY,rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(ACCELERATION,rnode)
KRATOS_CHECK_DOF_IN_NODE(DISPLACEMENT_X, rnode)
KRATOS_CHECK_DOF_IN_NODE(DISPLACEMENT_Y, rnode)
KRATOS_CHECK_DOF_IN_NODE(DISPLACEMENT_Z, rnode)
}
KRATOS_CATCH( "" );
return 0;
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "bdf_displacement_scheme",
"integration_order" : 2
})");
// Getting base class default parameters
const Parameters base_default_parameters = BDFBaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "bdf_displacement_scheme";
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBDFDisplacementScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Updating first time derivative (velocity)
* @param itNode the node interator
*/
inline void UpdateFirstDerivative(NodesArrayType::iterator itNode) override
{
array_1d<double, 3>& dotun0 = itNode->FastGetSolutionStepValue(VELOCITY);
noalias(dotun0) = BDFBaseType::mBDF[0] * itNode->FastGetSolutionStepValue(DISPLACEMENT);
for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order)
noalias(dotun0) += BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(DISPLACEMENT, i_order);
}
/**
* @brief Updating second time derivative (acceleration)
* @param itNode the node interator
*/
inline void UpdateSecondDerivative(NodesArrayType::iterator itNode) override
{
array_1d<double, 3>& dot2un0 = itNode->FastGetSolutionStepValue(ACCELERATION);
noalias(dot2un0) = BDFBaseType::mBDF[0] * itNode->FastGetSolutionStepValue(VELOCITY);
for (std::size_t i_order = 1; i_order < BDFBaseType::mOrder + 1; ++i_order)
noalias(dot2un0) += BDFBaseType::mBDF[i_order] * itNode->FastGetSolutionStepValue(VELOCITY, i_order);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBDFDisplacementScheme */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BDF_DISPLACEMENT_SCHEME defined */
|
EllipseDetection.h | #ifndef ELLIPSEDETECTION_H
#define ELLIPSEDETECTION_H
#include "EllipseEstimator.h"
#include "../ImageChannel.h"
#include "../segmentation/Segmentation.h"
#include "../filter/Clean.h"
#include "../filter/Gauss.h"
#include "../filter/Normalize.h"
#include "../filter/Dilate.h"
#include "../ImageFactory.h"
#include "../OpenCV.h"
#include <omp.h>
namespace K {
class EllipseDetection {
private:
K::EllipseEstimator::RANSACPixel ransac;
bool combineSimilar = false;
float blurSigma = 1.75f;
public:
EllipseDetection() {
// defaults
ransac.setMinCoverage(0.70f);
ransac.setRatioConstraint(1.0f, 1.5f);
ransac.setSizeConstraint(30, 150); // ransac.setSizeConstraint(15.0f, 180.0f);
ransac.setThreshold(0.28f); // minimal pixel brightness for accepting
ransac.setNumSamples(12); // number of samples for SVD
ransac.setNumRuns(15); // number of RANSAC runs
}
K::EllipseEstimator::RANSACPixel& getRANSAC() {
return ransac;
}
/** whether to combine similar ellipses into one */
void setCombineSimilar(const bool combine) {
this->combineSimilar = combine;
}
/** set the sigma to use for gaussian blur before performing ellipse estimation based on blurred pixels */
void setBlurSigma(const float sigma) {
this->blurSigma = sigma;
}
/** perform ellipse-detection on a given black/white image with 1pixel wide,white edges */
std::vector<K::Ellipse::GeometricParams> getFromEdgeImage(const K::ImageChannel& imgEdges) {
const std::vector<std::vector<K::Point2i>> allSegments = getSegments(imgEdges);
// TODO
//const std::vector<std::vector<K::Point2i>> splitSegments = getSegmentsSplitFix(allSegments, 200); //debug(splitSegments, imgEdges);
//const std::vector<std::vector<K::Point2i>> splitSegments = getSegmentsSplitVar(allSegments, 75, 450);
const std::vector<std::vector<K::Point2i>> splitSegments = getSegmentsSplitUnsplit(allSegments, 100, 1);
//const std::vector<std::vector<K::Point2i>> splitSegments = allSegments;
const K::ImageChannel imgEdgesBlur = getBlurred(imgEdges);
std::vector<K::Ellipse::GeometricParams> ellipses;
//#pragma omp parallel for
for (size_t i = 0; i < splitSegments.size(); ++i) {
// current segment
const std::vector<K::Point2i>& set = splitSegments[i];
// perform detection for the current segment
K::EllipseEstimator::RANSACPixel::MatchStats stats;
K::Ellipse::CanonicalParams canon = ransac.get(set, imgEdgesBlur, stats);
//if (canon.F < 0) {continue;}
K::Ellipse::GeometricParams geo = canon.toGeometric();
if (geo.a != geo.a || geo.b != geo.b) {continue;}
ellipses.push_back(geo);
}
// return ellipses;
if (combineSimilar) {
std::vector<K::Ellipse::GeometricParams> ellipsesDistinct = filterDuplicates(ellipses);
return ellipsesDistinct;
} else {
return ellipses;
}
}
private:
void debug(const std::vector<std::vector<K::Point2i>>& segments, const K::ImageChannel edges) {
for (const std::vector<K::Point2i>& seg : segments) {
K::ImageChannel tmp = edges * 0.5;
for (K::Point2i pt : seg) {
tmp.set(pt.x, pt.y, 1.0);
}
cv::imshow("xxx", K::CV::k_to_cv(tmp));
cv::waitKey(10);
}
}
K::ImageChannel getBlurred(const K::ImageChannel& imgEdges) {
// remove short edges [isolated pixels]
K::ImageChannel imgEdgesCleaned = K::CV::Clean::avgThreshold(imgEdges, 1, 0.33f);
K::ImageChannel imgEdgesBlur = imgEdgesCleaned;
//imgEdgesBlur = K::Dilate::apply(imgEdgesBlur, 2, K::Dilate::Shape::CIRCLE, 1.0f, 0.01f);
// slightly blur the image (spread edges)
if (blurSigma != 0) {
K::CV::Gauss gauss(blurSigma, blurSigma);
imgEdgesBlur = gauss.filter(imgEdgesBlur);
imgEdgesBlur = K::CV::Normalize::run(imgEdgesBlur);
}
// K::ImageFactory::writePNG("/tmp/bla.png", imgEdgesBlur);
return imgEdgesBlur;
}
std::vector<std::vector<K::Point2i>> getSegmentsSplitUnsplit(const std::vector<std::vector<K::Point2i>>& segments, const size_t maxSize, int maxSplits) {
std::vector<std::vector<K::Point2i>> splitSegments;
// process every input segment
for (const std::vector<K::Point2i>& seg : segments) {
// use as is
splitSegments.push_back(seg);
// large? -> also split
if (seg.size() > maxSize) {
int maxSubSegs = std::ceil((float)seg.size() / (float)maxSize);
maxSubSegs = std::min(maxSubSegs, maxSplits+1);
// 1) divide into 2 subsegments
// 2) divide into 3 subsegments
// 3) divide into 4 subsegments
// ....
for (int subSegs = 2; subSegs <= maxSubSegs; ++subSegs) {
//const int segSize = seg.size() / subSegs;
for (int subSeg = 0; subSeg < subSegs; ++subSeg) {
int start = seg.size() * subSeg / subSegs;
int end = seg.size() * (subSeg+1) / subSegs;
const std::vector<K::Point2i> sub(seg.begin()+start, seg.begin()+end);
splitSegments.push_back(sub);
}
}
// // split in two parts
// const int mid = seg.size()/2;
// const std::vector<K::Point2i> left(seg.begin(), seg.begin()+mid);
// const std::vector<K::Point2i> right(seg.begin()+mid, seg.end());
// splitSegments.push_back(left);
// splitSegments.push_back(right);
}
}
return splitSegments;
}
std::vector<std::vector<K::Point2i>> getSegmentsSplitFix(const std::vector<std::vector<K::Point2i>>& segments, const size_t maxSize) {
std::vector<std::vector<K::Point2i>> splitSegments;
// process every input segment
for (const std::vector<K::Point2i>& seg : segments) {
// keep small segments "as-is"
if (seg.size() <= maxSize) {
splitSegments.push_back(seg);
} else {
const int numSegments = (int) std::ceil((float)seg.size() / (float)maxSize);
const float sizePerSegment = seg.size() / numSegments;
// 50% overlapping segments
for (float segNr = 0; segNr <= numSegments - 0.4; segNr += 0.5) {
const int start = segNr * sizePerSegment;
const int end = std::min((size_t)(start + sizePerSegment), seg.size()-1);
const std::vector<K::Point2i> pts(seg.begin()+start, seg.begin()+end);
splitSegments.push_back(pts);
}
}
}
return splitSegments;
}
/**
* split all large segments into several [overlapping] smaller-sized segments
*/
std::vector<std::vector<K::Point2i>> getSegmentsSplitVar(const std::vector<std::vector<K::Point2i>>& segments, const float minSize, const float maxSize) {
// output
std::vector<std::vector<K::Point2i>> splitSegments;
// split large segments several times
for (const std::vector<K::Point2i>& seg : segments) {
// skip this segment?
if (seg.size() < minSize) {continue;}
const float stepSize = 0.4f;
const int maxSegs = std::ceil(seg.size() / minSize); // largest number of segment divisions to check
const int minSegs = std::ceil(seg.size() / maxSize); // smallest number of segment divisions to check
for (float segs = minSegs; segs <= maxSegs; segs+=stepSize) {
const float segSize = seg.size() / segs;
// 50% overlapping segments
for (float i = 0; i < segs; i += 0.50f) {
const int start = (int)(i*segSize);
const int end = std::min((int)(start + segSize), (int)(seg.size()-1));
const int num = end-start;
if (num < 20) {continue;}
const std::vector<K::Point2i> pts(seg.begin()+start, seg.begin()+end);
splitSegments.push_back(pts);
}
}
}
// K::ImageChannel img(640, 480);
// for (const std::vector<K::Point2i>& seg : splitSegments) {
// for (const K::Point2i p : seg) {
// img.set(p.x, p.y, 1.0);
// }
// }
// K::ImageFactory::writePNG("/tmp/segments.png", img);
return splitSegments;
}
std::vector<std::vector<K::Point2i>> getSegments(const K::ImageChannel& imgEdges) {
// get all segments within the image
std::vector<K::Segment<float>> segments = K::Segmentation::getSegments(imgEdges);
std::vector<std::vector<K::Point2i>> result;
for (const K::Segment<float>& seg : segments) {
if (seg.points.size() < 16) {continue;} // ingore very small segments
if (seg.avg == 0) {continue;} // ignore black parts
result.push_back(seg.points);
}
return result;
}
public:
/** combine ellipses that are similar and return a new ellipse which is given by their average */
static inline std::vector<K::Ellipse::GeometricParams> filterDuplicates(const std::vector<K::Ellipse::GeometricParams>& src) {
std::vector<K::Ellipse::GeometricParams> filtered;
for (const K::Ellipse::GeometricParams& e1 : src) {
bool unique = true;
for (K::Ellipse::GeometricParams& e2 : filtered) {
const float dCenterDiff = e1.center.getDistance(e2.center);
//const float dSizeRatio = std::max(e1.getCircumfence(), e2.getCircumfence()) / std::min(e1.getCircumfence(), e2.getCircumfence());
const float dAxis = std::sqrt( ((e1.a - e2.a) * (e1.a - e2.a)) + ((e1.b - e2.b) * (e1.b - e2.b)) );
const bool isSimilar = (dCenterDiff < 16) && (dAxis < 6);// && (dSizeRatio < 1.12f);
// if this ellipse is similar to an existing one, join the two
if (isSimilar) { unique = false; e2.mix(e1, 0.50f); break; }
}
if (unique) { filtered.push_back(e1); }
}
return filtered;
}
};
}
#endif // ELLIPSEDETECTION_H
|
GB_binop__ge_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__ge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_fp64)
// A*D function (colscale): GB (_AxD__ge_fp64)
// D*A function (rowscale): GB (_DxB__ge_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_fp64)
// C=scalar+B GB (_bind1st__ge_fp64)
// C=scalar+B' GB (_bind1st_tran__ge_fp64)
// C=A+scalar GB (_bind2nd__ge_fp64)
// C=A'+scalar GB (_bind2nd_tran__ge_fp64)
// C type: bool
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_FP64 || GxB_NO_GE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ge_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bitmap.c | // -----------------------------------------------------------------------------
//
// "00_AccelGraph"
//
// -----------------------------------------------------------------------------
// Copyright (c) 2014-2019 All rights reserved
// -----------------------------------------------------------------------------
// Author : Abdullah Mughrabi
// Email : atmughra@ncsu.edu||atmughrabi@gmail.com
// File : bitmap.c
// Create : 2019-06-21 17:15:17
// Revise : 2019-09-28 15:36:13
// Editor : Abdullah Mughrabi
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <stdint.h>
#include "myMalloc.h"
#include "bitmap.h"
struct Bitmap *newBitmap( uint32_t size)
{
struct Bitmap *bitmap = (struct Bitmap *) my_malloc( sizeof(struct Bitmap));
bitmap->bitarray = (uint32_t *) my_malloc(sizeof(uint32_t) * ((size + kBitsPerWord - 1) / kBitsPerWord));
bitmap->real_size = ((size + kBitsPerWord - 1) / kBitsPerWord);
memset(bitmap->bitarray, 0, (sizeof(uint32_t) * ((size + kBitsPerWord - 1) / kBitsPerWord)));
bitmap->size = size;
bitmap->numSetBits = 0;
return bitmap;
}
struct Bitmap *newBitmapSet( uint32_t size)
{
struct Bitmap *bitmap = (struct Bitmap *) my_malloc( sizeof(struct Bitmap));
bitmap->bitarray = (uint32_t *) my_malloc(sizeof(uint32_t) * ((size + kBitsPerWord - 1) / kBitsPerWord));
bitmap->real_size = ((size + kBitsPerWord - 1) / kBitsPerWord);
memset(bitmap->bitarray, 1, (sizeof(uint32_t) * ((size + kBitsPerWord - 1) / kBitsPerWord)));
bitmap->size = size;
bitmap->numSetBits = size;
return bitmap;
}
void freeBitmap( struct Bitmap *bitmap)
{
if(bitmap)
{
if(bitmap->bitarray)
free(bitmap->bitarray);
free(bitmap);
}
}
void clearBitmap(struct Bitmap *bitmap)
{
memset(bitmap->bitarray, 0, (sizeof(uint32_t) * ((bitmap->size + kBitsPerWord - 1) / kBitsPerWord)));
// uint32_t *word = bitmap->bitarray;
// uint32_t i;
// #pragma omp parallel for
// for(i= 0 ; i <((bitmap->size+kBitsPerWord - 1)/kBitsPerWord); i++){
// word[i] = 0;
// }
bitmap->numSetBits = 0;
}
void setBit(struct Bitmap *bitmap, uint32_t pos)
{
bitmap->bitarray[word_offset(pos)] |= (uint32_t) (1 << bit_offset(pos));
}
void setBitXOR(struct Bitmap *bitmap, uint32_t pos)
{
bitmap->bitarray[word_offset(pos)] ^= (uint32_t) (1 << bit_offset(pos));
}
void setBitRange(struct Bitmap *bitmap, uint32_t start, uint32_t end)
{
uint32_t pos;
for (pos = start; pos < end; ++pos)
{
setBit(bitmap, pos);
}
}
void setBitAtomic(struct Bitmap *bitmap, uint32_t pos)
{
// uint32_t old_val, new_val;
// do {
// old_val = bitmap->bitarray[word_offset(pos)];
// new_val = old_val | (uint32_t) (1 << bit_offset(pos));
// } while (!__sync_bool_compare_and_swap(&bitmap->bitarray[word_offset(pos)], old_val, new_val));
__sync_fetch_and_or(bitmap->bitarray + word_offset(pos), 1ul << bit_offset(pos));
}
uint32_t getBit(struct Bitmap *bitmap, uint32_t pos)
{
return (bitmap->bitarray[word_offset(pos)] >> bit_offset(pos)) & 1l;;
}
// uint32_t getBitAtomic(struct Bitmap* bitmap, uint32_t pos){
// return (bitmap->bitarray[word_offset(pos)] >> bit_offset(pos)) & 1l;;
// }
void clearBit(struct Bitmap *bitmap, uint32_t pos)
{
bitmap->bitarray[word_offset(pos)] &= ((uint32_t) (~(1l << bit_offset(pos))));
}
struct Bitmap *orBitmap(struct Bitmap *bitmap1, struct Bitmap *bitmap2)
{
uint32_t i;
uint32_t *word1 = bitmap1->bitarray;
uint32_t *word2 = bitmap2->bitarray;
bitmap1->numSetBits = 0;
for(i = 0 ; i < ((bitmap1->size + kBitsPerWord - 1) / kBitsPerWord); i++)
{
word1[i] = word1[i] | word2[i];
}
bitmap1->numSetBits = getNumOfSetBits(bitmap1);
return bitmap1;
}
struct Bitmap *andBitmap(struct Bitmap *bitmap1, struct Bitmap *bitmap2)
{
uint32_t i;
uint32_t *byte1 = bitmap1->bitarray;
uint32_t *byte2 = bitmap2->bitarray;
bitmap1->numSetBits = 0;
for(i = 0 ; i < ((bitmap1->size + kBitsPerWord - 1) / kBitsPerWord); i++)
{
byte1[i] = byte1[i] & byte2[i];
}
bitmap1->numSetBits = getNumOfSetBits(bitmap1);
return bitmap1;
}
void swapBitmaps (struct Bitmap **bitmap1, struct Bitmap **bitmap2)
{
struct Bitmap *temp_bitmap = *bitmap1;
*bitmap1 = *bitmap2;
*bitmap2 = temp_bitmap;
}
uint32_t getNumOfSetBits (struct Bitmap *bitmap)
{
uint32_t i;
uint32_t numSetBits = 0;
#pragma omp parallel for reduction(+:numSetBits) schedule(dynamic,256)
for(i = 0 ; i < (bitmap->size); i++)
{
if(getBit(bitmap, i))
numSetBits++;
}
return numSetBits;
}
void printSetBits (struct Bitmap *bitmap)
{
uint32_t i;
for(i = 0 ; i < (bitmap->size); i++)
{
if(getBit(bitmap, i))
{
printf("**%u \n", i);
}
}
} |
hello.c | #include<stdio.h>
#include<stdlib.h>
#include<omp.h>
int main(int argc, char* argv[]) {
int num_threads;
if (argc <= 1)
num_threads = 1;
else
num_threads = atoi(argv[1]);
omp_set_num_threads(num_threads);
#pragma omp parallel
{
int ID = omp_get_thread_num();
printf("Hello World from %d\n", ID);
printf("Goodbye World from %d!\n", ID);
}
return 0;
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(16*t2-Nz-4,8)),t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(8*t1+Ny+13,8)),floord(16*t2+Ny+12,8)),floord(16*t1-16*t2+Nz+Ny+11,8));t3++) {
for (t4=max(max(max(0,ceild(t1-255,256)),ceild(16*t2-Nz-2044,2048)),ceild(8*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(8*t1+Nx+13,2048)),floord(16*t2+Nx+12,2048)),floord(8*t3+Nx+4,2048)),floord(16*t1-16*t2+Nz+Nx+11,2048));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),8*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),8*t3+6),2048*t4+2046),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
magsac.h | #pragma once
#include <limits>
#include <chrono>
#include <memory>
#include "model.h"
#include "model_score.h"
#include "sampler.h"
#include "uniform_sampler.h"
#include <math.h>
#include "gamma_values.cpp"
#ifdef _WIN32
#include <ppl.h>
#endif
template <class DatumType, class ModelEstimator>
class MAGSAC
{
public:
enum Version {
// The original version of MAGSAC. It works well, however, can be quite slow in many cases.
MAGSAC_ORIGINAL,
// The recently proposed MAGSAC++ algorithm which keeps the accuracy of the original MAGSAC but is often orders of magnitude faster.
MAGSAC_PLUS_PLUS };
MAGSAC(const Version magsac_version_ = Version::MAGSAC_PLUS_PLUS) :
time_limit(std::numeric_limits<double>::max()), //
desired_fps(-1),
iteration_limit(std::numeric_limits<size_t>::max()),
maximum_threshold(10.0),
apply_post_processing(true),
mininum_iteration_number(50),
partition_number(5),
core_number(1),
number_of_irwls_iters(1),
interrupting_threshold(1.0),
last_iteration_number(0),
log_confidence(0),
point_number(0),
magsac_version(magsac_version_)
{
}
~MAGSAC() {}
// A function to run MAGSAC.
bool run(
const cv::Mat &points_, // The input data points
const double confidence_, // The required confidence in the results
ModelEstimator& estimator_, // The model estimator
gcransac::sampler::Sampler<cv::Mat, size_t> &sampler_, // The sampler used
gcransac::Model &obtained_model_, // The estimated model parameters
int &iteration_number_, // The number of iterations done
ModelScore &model_score_); // The score of the estimated model
// A function to set the maximum inlier-outlier threshold
void setMaximumThreshold(const double maximum_threshold_)
{
maximum_threshold = maximum_threshold_;
}
// A function to set the inlier-outlier threshold used for speeding up the procedure
// and for determining the required number of iterations.
void setReferenceThreshold(const double threshold_)
{
interrupting_threshold = threshold_;
}
double getReferenceThreshold()
{
return interrupting_threshold;
}
// Setting the flag determining if post-processing is needed
void applyPostProcessing(bool value_)
{
apply_post_processing = value_;
}
// A function to set the maximum number of iterations
void setIterationLimit(size_t iteration_limit_)
{
iteration_limit = iteration_limit_;
}
// A function to set the minimum number of iterations
void setMinimumIterationNumber(size_t mininum_iteration_number_)
{
mininum_iteration_number = mininum_iteration_number_;
}
// A function to set the number of cores used in the original MAGSAC algorithm.
// In MAGSAC++, it is not used. Note that when multiple MAGSACs run in parallel,
// it is beneficial to keep the core number one for each independent MAGSAC.
// Otherwise, the threads will act weirdly.
void setCoreNumber(size_t core_number_)
{
if (magsac_version == MAGSAC_PLUS_PLUS)
fprintf(stderr, "Setting the core number for MAGSAC++ is deprecated.\n");
core_number = core_number_;
}
// Setting the number of partitions used in the original MAGSAC algorithm
// to speed up the procedure. In MAGSAC++, this parameter is not used.
void setPartitionNumber(size_t partition_number_)
{
if (magsac_version == MAGSAC_PLUS_PLUS)
fprintf(stderr, "Setting the partition number for MAGSAC++ is deprecated.\n");
partition_number = partition_number_;
}
// A function to set a desired minimum frames-per-second (FPS) value.
void setFPS(int fps_)
{
desired_fps = fps_; // The required FPS.
// The time limit which the FPS implies
time_limit = fps_ <= 0 ?
std::numeric_limits<double>::max() :
1.0 / fps_;
}
// The post-processing algorithm applying sigma-consensus to the input model once.
bool postProcessing(
const cv::Mat &points, // All data points
const gcransac::Model &so_far_the_best_model, // The input model to be improved
gcransac::Model &output_model, // The improved model parameters
ModelScore &output_score, // The score of the improved model
const ModelEstimator &estimator); // The model estimator
// The function determining the quality/score of a model using the original MAGSAC
// criterion. Note that this function is significantly slower than the quality
// function of MAGSAC++.
void getModelQuality(
const cv::Mat& points_, // All data points
const gcransac::Model& model_, // The input model
const ModelEstimator& estimator_, // The model estimator
double& marginalized_iteration_number_, // The required number of iterations marginalized over the noise scale
double& score_); // The score/quality of the model
// The function determining the quality/score of a
// model using the MAGSAC++ criterion.
void getModelQualityPlusPlus(
const cv::Mat &points_, // All data points
const gcransac::Model &model_, // The model parameter
const ModelEstimator &estimator_, // The model estimator class
double &score_, // The score to be calculated
const double &previous_best_score_); // The score of the previous so-far-the-best model
size_t number_of_irwls_iters;
protected:
Version magsac_version; // The version of MAGSAC used
size_t iteration_limit; // Maximum number of iterations allowed
size_t mininum_iteration_number; // Minimum number of iteration before terminating
double maximum_threshold; // The maximum sigma value
size_t core_number; // Number of core used in sigma-consensus
double time_limit; // A time limit after the algorithm is interrupted
int desired_fps; // The desired FPS (TODO: not tested with MAGSAC)
bool apply_post_processing; // Decides if the post-processing step should be applied
int point_number; // The current point number
int last_iteration_number; // The iteration number implied by the last run of sigma-consensus
double log_confidence; // The logarithm of the required confidence
size_t partition_number; // Number of partitions used to speed up sigma-consensus
double interrupting_threshold; // A threshold to speed up MAGSAC by interrupting the sigma-consensus procedure whenever there is no chance of being better than the previous so-far-the-best model
bool sigmaConsensus(
const cv::Mat& points_,
const gcransac::Model& model_,
gcransac::Model& refined_model_,
ModelScore& score_,
const ModelEstimator& estimator_,
const ModelScore& best_score_);
bool sigmaConsensusPlusPlus(
const cv::Mat &points_,
const gcransac::Model& model_,
gcransac::Model& refined_model_,
ModelScore &score_,
const ModelEstimator &estimator_,
const ModelScore &best_score_);
};
template <class DatumType, class ModelEstimator>
bool MAGSAC<DatumType, ModelEstimator>::run(
const cv::Mat& points_,
const double confidence_,
ModelEstimator& estimator_,
gcransac::sampler::Sampler<cv::Mat, size_t> &sampler_,
gcransac::Model& obtained_model_,
int& iteration_number_,
ModelScore &model_score_)
{
// Initialize variables
std::chrono::time_point<std::chrono::system_clock> start, end; // Variables for time measuring: start and end times
std::chrono::duration<double> elapsed_seconds; // Variables for time measuring: elapsed time
log_confidence = log(1.0 - confidence_); // The logarithm of 1 - confidence
point_number = points_.rows; // Number of points
constexpr size_t sample_size = estimator_.sampleSize(); // The sample size required for the estimation
size_t max_iteration = iteration_limit; // The maximum number of iterations initialized to the iteration limit
int iteration = 0; // Current number of iterations
gcransac::Model so_far_the_best_model; // Current best model
ModelScore so_far_the_best_score; // The score of the current best model
std::unique_ptr<size_t[]> minimal_sample(new size_t[sample_size]); // The sample used for the estimation
std::vector<size_t> pool(points_.rows);
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
pool[point_idx] = point_idx;
if (points_.rows < sample_size)
{
fprintf(stderr, "There are not enough points for applying robust estimation. Minimum is %d; while %d are given.\n",
sample_size, points_.rows);
return false;
}
// Set the start time variable if there is some time limit set
if (desired_fps > -1)
start = std::chrono::system_clock::now();
constexpr size_t max_unsuccessful_model_generations = 50;
// Main MAGSAC iteration
while (mininum_iteration_number > iteration ||
iteration < max_iteration)
{
// Increase the current iteration number
++iteration;
// Sample a minimal subset
std::vector<gcransac::Model> models; // The set of estimated models
size_t unsuccessful_model_generations = 0; // The number of unsuccessful model generations
// Try to select a minimal sample and estimate the implied model parameters
while (++unsuccessful_model_generations < max_unsuccessful_model_generations)
{
// Get a minimal sample randomly
if (!sampler_.sample(pool, // The index pool from which the minimal sample can be selected
minimal_sample.get(), // The minimal sample
sample_size)) // The size of a minimal sample
continue;
// Check if the selected sample is valid before estimating the model
// parameters which usually takes more time.
if (!estimator_.isValidSample(points_, // All points
minimal_sample.get())) // The current sample
continue;
// Estimate the model from the minimal sample
if (estimator_.estimateModel(points_, // All data points
minimal_sample.get(), // The selected minimal sample
&models)) // The estimated models
break;
}
// If the method was not able to generate any usable models, break the cycle.
iteration += unsuccessful_model_generations - 1;
// Select the so-far-the-best from the estimated models
for (const auto &model : models)
{
ModelScore score; // The score of the current model
gcransac::Model refined_model; // The refined model parameters
// Apply sigma-consensus to refine the model parameters by marginalizing over the noise level sigma
bool success;
if (magsac_version == Version::MAGSAC_ORIGINAL)
success = sigmaConsensus(points_,
model,
refined_model,
score,
estimator_,
so_far_the_best_score);
else
success = sigmaConsensusPlusPlus(points_,
model,
refined_model,
score,
estimator_,
so_far_the_best_score);
// Continue if the model was rejected
if (!success || score.score == -1)
continue;
// Save the iteration number when the current model is found
score.iteration = iteration;
// Update the best model parameters if needed
if (so_far_the_best_score < score)
{
so_far_the_best_model = refined_model; // Update the best model parameters
so_far_the_best_score = score; // Update the best model's score
max_iteration = MIN(max_iteration, last_iteration_number); // Update the max iteration number, but do not allow to increase
}
}
// Update the time parameters if a time limit is set
if (desired_fps > -1)
{
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
// Interrupt if the time limit is exceeded
if (elapsed_seconds.count() > time_limit)
break;
}
}
// Apply sigma-consensus as a post processing step if needed and the estimated model is valid
if (apply_post_processing)
{
// TODO
}
obtained_model_ = so_far_the_best_model;
iteration_number_ = iteration;
model_score_ = so_far_the_best_score;
return so_far_the_best_score.score > 0;
}
template <class DatumType, class ModelEstimator>
bool MAGSAC<DatumType, ModelEstimator>::postProcessing(
const cv::Mat &points_,
const gcransac::Model &model_,
gcransac::Model &refined_model_,
ModelScore &refined_score_,
const ModelEstimator &estimator_)
{
fprintf(stderr, "Sigma-consensus++ is not implemented yet as post-processing.\n");
return false;
}
template <class DatumType, class ModelEstimator>
bool MAGSAC<DatumType, ModelEstimator>::sigmaConsensus(
const cv::Mat &points_,
const gcransac::Model& model_,
gcransac::Model& refined_model_,
ModelScore &score_,
const ModelEstimator &estimator_,
const ModelScore &best_score_)
{
// Set up the parameters
constexpr double L = 1.05;
constexpr double k = ModelEstimator::getSigmaQuantile();
constexpr double threshold_to_sigma_multiplier = 1.0 / k;
constexpr size_t sample_size = estimator_.sampleSize();
static auto comparator = [](std::pair<double, int> left, std::pair<double, int> right) { return left.first < right.first; };
const int point_number = points_.rows;
double current_maximum_sigma = this->maximum_threshold;
// Calculating the residuals
std::vector< std::pair<double, size_t> > all_residuals;
all_residuals.reserve(point_number);
// If it is not the first run, consider the previous best and interrupt the validation when there is no chance of being better
if (best_score_.inlier_number > 0)
{
// Number of inliers which should be exceeded
int points_remaining = best_score_.inlier_number;
// Collect the points which are closer than the threshold which the maximum sigma implies
for (int point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual = estimator_.residual(points_.row(point_idx), model_);
if (current_maximum_sigma > residual)
{
// Store the residual of the current point and its index
all_residuals.emplace_back(std::make_pair(residual, point_idx));
// Count points which are closer than a reference threshold to speed up the procedure
if (residual < interrupting_threshold)
--points_remaining;
}
// Interrupt if there is no chance of being better
// TODO: replace this part by SPRT test
if (point_number - point_idx < points_remaining)
return false;
}
// Store the number of really close inliers just to speed up the procedure
// by interrupting the next verifications.
score_.inlier_number = best_score_.inlier_number - points_remaining;
}
else
{
// The number of really close points
size_t points_close = 0;
// Collect the points which are closer than the threshold which the maximum sigma implies
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual = estimator_.residual(points_.row(point_idx), model_);
if (current_maximum_sigma > residual)
{
// Store the residual of the current point and its index
all_residuals.emplace_back(std::make_pair(residual, point_idx));
// Count points which are closer than a reference threshold to speed up the procedure
if (residual < interrupting_threshold)
++points_close;
}
}
// Store the number of really close inliers just to speed up the procedure
// by interrupting the next verifications.
score_.inlier_number = points_close;
}
std::vector<gcransac::Model> sigma_models;
std::vector<size_t> sigma_inliers;
std::vector<double> final_weights;
// The number of possible inliers
const size_t possible_inlier_number = all_residuals.size();
// Sort the residuals in ascending order
std::sort(all_residuals.begin(), all_residuals.end(), comparator);
// The maximum threshold is set to be slightly bigger than the distance of the
// farthest possible inlier.
current_maximum_sigma =
all_residuals.back().first + std::numeric_limits<double>::epsilon();
const double sigma_step = current_maximum_sigma / partition_number;
last_iteration_number = 10000;
score_.score = 0;
// The weights calculated by each parallel process
std::vector<std::vector<double>> point_weights_par(partition_number, std::vector<double>(possible_inlier_number, 0));
// If OpenMP is used, calculate things in parallel
#ifdef USE_OPENMP
#pragma omp parallel for num_threads(core_number)
for (int partition_idx = 0; partition_idx < partition_number; ++partition_idx)
{
// The maximum sigma value in the current partition
const double max_sigma = (partition_idx + 1) * sigma_step;
// Find the last element which has smaller distance than 'max_threshold'
// Since the vector is ordered binary search can be used to find that particular element.
const auto &last_element = std::upper_bound(all_residuals.begin(), all_residuals.end(), std::make_pair(max_sigma, 0), comparator);
const size_t sigma_inlier_number = last_element - all_residuals.begin();
// Put the indices into a vector
std::vector<size_t> sigma_inliers;
sigma_inliers.reserve(sigma_inlier_number);
// Store the points which are closer than the current sigma limit
for (size_t relative_point_idx = 0; relative_point_idx < sigma_inlier_number; ++relative_point_idx)
sigma_inliers.emplace_back(all_residuals[relative_point_idx].second);
// Check if there are enough inliers to fit a model
if (sigma_inliers.size() > sample_size)
{
// Estimating the model which the current set of inliers imply
std::vector<gcransac::Model> sigma_models;
estimator_.estimateModelNonminimal(points_,
&(sigma_inliers)[0],
sigma_inlier_number,
&sigma_models);
// If the estimation was successful calculate the implied probabilities
if (sigma_models.size() == 1)
{
const double max_sigma_squared_2 = 2 * max_sigma * max_sigma;
double residual_i_2, // The residual of the i-th point
probability_i; // The probability of the i-th point
// Iterate through all points to estimate the related probabilities
for (size_t relative_point_idx = 0; relative_point_idx < sigma_inliers.size(); ++relative_point_idx)
{
// TODO: Replace with Chi-square instead of normal distribution
const size_t &point_idx = sigma_inliers[relative_point_idx];
// Calculate the residual of the current point
residual_i_2 = estimator_.squaredResidual(points_.row(point_idx),
sigma_models[0]);
// Calculate the probability of the i-th point assuming Gaussian distribution
// TODO: replace by Chi-square distribution
probability_i = exp(-residual_i_2 / max_sigma_squared_2);
// Store the probability of the i-th point coming from the current partition
point_weights_par[partition_idx][relative_point_idx] += probability_i;
}
}
}
}
#else
fprintf(stderr, "Not implemented yet.\n");
#endif
// The weights used for the final weighted least-squares fitting
// If point normalization is applied the indexing of the weights differs.
// In that case
// final_weights[i] is the weight of inlier[i]-th point
// Otherwise,
// final_weights[i] is the weight of i-th point
if constexpr (ModelEstimator::doesNormalizationForNonMinimalFitting())
final_weights.reserve(possible_inlier_number);
else
final_weights.resize(point_number, 0);
// Collect all points which has higher probability of being inlier than zero
sigma_inliers.reserve(possible_inlier_number);
for (size_t point_idx = 0; point_idx < possible_inlier_number; ++point_idx)
{
// Calculate the weight of the current point
double weight = 0.0;
for (size_t partition_idx = 0; partition_idx < partition_number; ++partition_idx)
weight += point_weights_par[partition_idx][point_idx];
// If the weight is approx. zero, continue.
if (weight < std::numeric_limits<double>::epsilon())
continue;
// Store the index and weight of the current point
sigma_inliers.emplace_back(all_residuals[point_idx].second);
if constexpr (ModelEstimator::doesNormalizationForNonMinimalFitting())
final_weights.emplace_back(weight);
else
final_weights[point_idx] = weight;
}
// If there are fewer inliers than the size of the minimal sample interupt the procedure
if (sigma_inliers.size() < sample_size)
return false;
// Estimate the model parameters using weighted least-squares fitting
if (!estimator_.estimateModelNonminimal(
points_, // All input points
&(sigma_inliers)[0], // Points which have higher than 0 probability of being inlier
static_cast<int>(sigma_inliers.size()), // Number of possible inliers
&sigma_models, // Estimated models
&(final_weights)[0])) // Weights of points
return false;
bool is_model_updated = false;
if (sigma_models.size() == 1 && // If only a single model is estimated
estimator_.isValidModel(sigma_models.back(),
points_,
sigma_inliers,
&(sigma_inliers)[0],
interrupting_threshold,
is_model_updated)) // and it is valid
{
// Return the refined model
refined_model_ = sigma_models.back();
// Calculate the score of the model and the implied iteration number
double marginalized_iteration_number;
getModelQuality(points_, // All the input points
refined_model_, // The estimated model
estimator_, // The estimator
marginalized_iteration_number, // The marginalized inlier ratio
score_.score); // The marginalized score
if (marginalized_iteration_number < 0 || std::isnan(marginalized_iteration_number))
last_iteration_number = std::numeric_limits<int>::max();
else
last_iteration_number = static_cast<int>(round(marginalized_iteration_number));
return true;
}
return false;
}
template <class DatumType, class ModelEstimator>
bool MAGSAC<DatumType, ModelEstimator>::sigmaConsensusPlusPlus(
const cv::Mat &points_,
const gcransac::Model& model_,
gcransac::Model& refined_model_,
ModelScore &score_,
const ModelEstimator &estimator_,
const ModelScore &best_score_)
{
// The degrees of freedom of the data from which the model is estimated.
// E.g., for models coming from point correspondences (x1,y1,x2,y2), it is 4.
constexpr size_t degrees_of_freedom = ModelEstimator::getDegreesOfFreedom();
// A 0.99 quantile of the Chi^2-distribution to convert sigma values to residuals
constexpr double k = ModelEstimator::getSigmaQuantile();
// A multiplier to convert residual values to sigmas
constexpr double threshold_to_sigma_multiplier = 1.0 / k;
// Calculating k^2 / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
constexpr double squared_k_per_2 = k * k / 2.0;
// Calculating (DoF - 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
constexpr double dof_minus_one_per_two = (degrees_of_freedom - 1.0) / 2.0;
// TODO: check
constexpr double C = ModelEstimator::getC();
// The size of a minimal sample used for the estimation
constexpr size_t sample_size = estimator_.sampleSize();
// Calculating 2^(DoF - 1) which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double two_ad_dof = std::pow(2.0, dof_minus_one_per_two);
// Calculating C * 2^(DoF - 1) which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double C_times_two_ad_dof = C * two_ad_dof;
// Calculating the gamma value of (DoF - 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double gamma_value = tgamma(dof_minus_one_per_two);
// Calculating the upper incomplete gamma value of (DoF - 1) / 2 with k^2 / 2.
constexpr double gamma_k = ModelEstimator::getUpperIncompleteGammaOfK();
// Calculating the lower incomplete gamma value of (DoF - 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double gamma_difference = gamma_value - gamma_k;
// The number of points provided
const int point_number = points_.rows;
// The manually set maximum inlier-outlier threshold
double current_maximum_sigma = this->maximum_threshold;
// Calculating the pairs of (residual, point index).
std::vector< std::pair<double, size_t> > residuals;
// Occupy the maximum required memory to avoid doing it later.
residuals.reserve(point_number);
// If it is not the first run, consider the previous best and interrupt the validation when there is no chance of being better
if (best_score_.inlier_number > 0)
{
// Number of points close to the previous so-far-the-best model.
// This model should have more inliers.
int points_remaining = best_score_.inlier_number;
// Collect the points which are closer than the threshold which the maximum sigma implies
for (int point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual = estimator_.residual(points_.row(point_idx), model_);
if (current_maximum_sigma > residual)
{
// Store the residual of the current point and its index
residuals.emplace_back(std::make_pair(residual, point_idx));
// Count points which are closer than a reference threshold to speed up the procedure
if (residual < interrupting_threshold)
--points_remaining;
}
// Interrupt if there is no chance of being better
// TODO: replace this part by SPRT test
if (point_number - point_idx < points_remaining)
return false;
}
// Store the number of really close inliers just to speed up the procedure
// by interrupting the next verifications.
score_.inlier_number = best_score_.inlier_number - points_remaining;
}
else
{
// The number of really close points
size_t points_close = 0;
// Collect the points which are closer than the threshold which the maximum sigma implies
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual = estimator_.residual(points_.row(point_idx), model_);
if (current_maximum_sigma > residual)
{
// Store the residual of the current point and its index
residuals.emplace_back(std::make_pair(residual, point_idx));
// Count points which are closer than a reference threshold to speed up the procedure
if (residual < interrupting_threshold)
++points_close;
}
}
// Store the number of really close inliers just to speed up the procedure
// by interrupting the next verifications.
score_.inlier_number = points_close;
}
// Models fit by weighted least-squares fitting
std::vector<gcransac::Model> sigma_models;
// Points used in the weighted least-squares fitting
std::vector<size_t> sigma_inliers;
// Weights used in the the weighted least-squares fitting
std::vector<double> sigma_weights;
// Number of points considered in the fitting
const size_t possible_inlier_number = residuals.size();
// Occupy the memory to avoid doing it inside the calculation possibly multiple times
sigma_inliers.reserve(possible_inlier_number);
// Occupy the memory to avoid doing it inside the calculation possibly multiple times
sigma_weights.reserve(possible_inlier_number);
// Calculate 2 * \sigma_{max}^2 a priori
const double squared_sigma_max_2 = current_maximum_sigma * current_maximum_sigma * 2.0;
// Divide C * 2^(DoF - 1) by \sigma_{max} a priori
const double one_over_sigma = C_times_two_ad_dof / current_maximum_sigma;
// Calculate the weight of a point with 0 residual (i.e., fitting perfectly) a priori
const double weight_zero = one_over_sigma * gamma_difference;
// Initialize the polished model with the initial one
gcransac::Model polished_model = model_;
// A flag to determine if the initial model has been updated
bool updated = false;
// Do the iteratively re-weighted least squares fitting
for (size_t iterations = 0; iterations < number_of_irwls_iters; ++iterations)
{
// If the current iteration is not the first, the set of possibly inliers
// (i.e., points closer than the maximum threshold) have to be recalculated.
if (iterations > 0)
{
// The number of points close to the model
size_t points_close = 0;
// Remove everything from the residual vector
residuals.clear();
// Collect the points which are closer than the maximum threshold
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual = estimator_.residual(points_.row(point_idx), polished_model);
if (current_maximum_sigma > residual)
{
// Store the residual of the current point and its index
residuals.emplace_back(std::make_pair(residual, point_idx));
// Count points which are closer than a reference threshold to speed up the procedure
if (residual < interrupting_threshold)
++points_close;
}
}
// Store the number of really close inliers just to speed up the procedure
// by interrupting the next verifications.
score_.inlier_number = points_close;
// Number of points closer than the threshold
const size_t possible_inlier_number = residuals.size();
// Clear the inliers and weights
sigma_inliers.clear();
sigma_weights.clear();
// Occupy the memory for the inliers and weights
sigma_inliers.reserve(possible_inlier_number);
sigma_weights.reserve(possible_inlier_number);
}
if constexpr (!ModelEstimator::doesNormalizationForNonMinimalFitting())
sigma_weights.resize(point_number, 0);
// Calculate the weight of each point
for (const auto &[residual, idx] : residuals)
{
// The weight
double weight = 0.0;
// If the residual is ~0, the point fits perfectly and it is handled differently
if (residual < std::numeric_limits<double>::epsilon())
weight = weight_zero;
else
{
// Calculate the squared residual
const double squared_residual = residual * residual;
// Get the position of the gamma value in the lookup table
size_t x = round(precision_of_stored_gammas * squared_residual / squared_sigma_max_2);
// Put the index of the point into the vector of points used for the least squares fitting
sigma_inliers.emplace_back(idx);
// If the sought gamma value is not stored in the lookup, return the closest element
if (stored_gamma_number < x)
x = stored_gamma_number;
// Calculate the weight of the point
weight = one_over_sigma * (stored_gamma_values[x] - gamma_k);
}
// Store the weight of the point
if constexpr (ModelEstimator::doesNormalizationForNonMinimalFitting())
sigma_weights.emplace_back(weight);
else
sigma_weights[idx] = weight;
}
// If there are fewer than the minimum point close to the model,
// terminate.
if (sigma_inliers.size() < sample_size)
return false;
// Estimate the model parameters using weighted least-squares fitting
if (!estimator_.estimateModelNonminimal(
points_, // All input points
&(sigma_inliers)[0], // Points which have higher than 0 probability of being inlier
static_cast<int>(sigma_inliers.size()), // Number of possible inliers
&sigma_models, // Estimated models
&(sigma_weights)[0])) // Weights of points
{
// If the estimation failed and the iteration was never successfull,
// terminate with failure.
if (iterations == 0)
return false;
// Otherwise, if the iteration was successfull at least once,
// simply break it.
break;
}
// Update the model parameters
polished_model = sigma_models[0];
// Clear the vector of models and keep only the best
sigma_models.clear();
// The model has been updated
updated = true;
}
bool is_model_updated = false;
if (updated && // If the model has been updated
estimator_.isValidModel(polished_model,
points_,
sigma_inliers,
&(sigma_inliers[0]),
interrupting_threshold,
is_model_updated)) // and it is valid
{
// Return the refined model
refined_model_ = polished_model;
// Calculate the score of the model and the implied iteration number
double marginalized_iteration_number;
getModelQualityPlusPlus(points_, // All the input points
refined_model_, // The estimated model
estimator_, // The estimator
score_.score, // The marginalized score
best_score_.score); // The score of the previous so-far-the-best model
// Update the iteration number
last_iteration_number =
log_confidence / log(1.0 - std::pow(static_cast<double>(score_.inlier_number) / point_number, sample_size));
return true;
}
return false;
}
template <class DatumType, class ModelEstimator>
void MAGSAC<DatumType, ModelEstimator>::getModelQualityPlusPlus(
const cv::Mat &points_, // All data points
const gcransac::Model &model_, // The model parameter
const ModelEstimator &estimator_, // The model estimator class
double &score_, // The score to be calculated
const double &previous_best_score_) // The score of the previous so-far-the-best model
{
// The degrees of freedom of the data from which the model is estimated.
// E.g., for models coming from point correspondences (x1,y1,x2,y2), it is 4.
constexpr size_t degrees_of_freedom = ModelEstimator::getDegreesOfFreedom();
// A 0.99 quantile of the Chi^2-distribution to convert sigma values to residuals
constexpr double k = ModelEstimator::getSigmaQuantile();
// A multiplier to convert residual values to sigmas
constexpr double threshold_to_sigma_multiplier = 1.0 / k;
// Calculating k^2 / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
constexpr double squared_k_per_2 = k * k / 2.0;
// Calculating (DoF - 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
constexpr double dof_minus_one_per_two = (degrees_of_freedom - 1.0) / 2.0;
// Calculating (DoF + 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
constexpr double dof_plus_one_per_two = (degrees_of_freedom + 1.0) / 2.0;
// TODO: check
constexpr double C = 0.25;
// Calculating 2^(DoF - 1) which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double two_ad_dof_minus_one = std::pow(2.0, dof_minus_one_per_two);
// Calculating 2^(DoF + 1) which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double two_ad_dof_plus_one = std::pow(2.0, dof_plus_one_per_two);
// Calculate the gamma value of k
constexpr double gamma_value_of_k = ModelEstimator::getUpperIncompleteGammaOfK();
// Calculate the lower incomplete gamma value of k
constexpr double lower_gamma_value_of_k = ModelEstimator::getLowerIncompleteGammaOfK();
// The number of points provided
const int point_number = points_.rows;
// The previous best loss
const double previous_best_loss = 1.0 / previous_best_score_;
// Convert the maximum threshold to a sigma value
const double maximum_sigma = threshold_to_sigma_multiplier * maximum_threshold;
// Calculate the squared maximum sigma
const double maximum_sigma_2 = maximum_sigma * maximum_sigma;
// Calculate \sigma_{max}^2 / 2
const double maximum_sigma_2_per_2 = maximum_sigma_2 / 2.0;
// Calculate 2 * \sigma_{max}^2
const double maximum_sigma_2_times_2 = maximum_sigma_2 * 2.0;
// Calculate the loss implied by an outlier
const double outlier_loss = maximum_sigma * two_ad_dof_minus_one * lower_gamma_value_of_k;
// Calculating 2^(DoF + 1) / \sigma_{max} which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
const double two_ad_dof_plus_one_per_maximum_sigma = two_ad_dof_plus_one / maximum_sigma;
// The loss which a point implies
double loss = 0.0,
// The total loss regarding the current model
total_loss = 0.0;
// Iterate through all points to calculate the implied loss
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual =
estimator_.residualForScoring(points_.row(point_idx), model_.descriptor);
// If the residual is smaller than the maximum threshold, consider it outlier
// and add the loss implied to the total loss.
if (maximum_threshold < residual)
loss = outlier_loss;
else // Otherwise, consider the point inlier, and calculate the implied loss
{
// Calculate the squared residual
const double squared_residual = residual * residual;
// Divide the residual by the 2 * \sigma^2
const double squared_residual_per_sigma = squared_residual / maximum_sigma_2_times_2;
// Get the position of the gamma value in the lookup table
size_t x = round(precision_of_stored_incomplete_gammas * squared_residual_per_sigma);
// If the sought gamma value is not stored in the lookup, return the closest element
if (stored_incomplete_gamma_number < x)
x = stored_incomplete_gamma_number;
// Calculate the loss implied by the current point
loss = maximum_sigma_2_per_2 * stored_lower_incomplete_gamma_values[x] +
squared_residual / 4.0 * (stored_complete_gamma_values[x] -
gamma_value_of_k);
loss = loss * two_ad_dof_plus_one_per_maximum_sigma;
}
// Update the total loss
total_loss += loss;
// Break the validation if there is no chance of being better than the previous
// so-far-the-best model.
if (previous_best_loss < total_loss)
break;
}
// Calculate the score of the model from the total loss
score_ = 1.0 / total_loss;
}
template <class DatumType, class ModelEstimator>
void MAGSAC<DatumType, ModelEstimator>::getModelQuality(
const cv::Mat &points_, // All data points
const gcransac::Model &model_, // The model parameter
const ModelEstimator &estimator_, // The model estimator class
double &marginalized_iteration_number_, // The marginalized iteration number to be calculated
double &score_) // The score to be calculated
{
// Set up the parameters
constexpr size_t sample_size = estimator_.sampleSize();
const size_t point_number = points_.rows;
// Getting the inliers
std::vector<std::pair<double, size_t>> all_residuals;
all_residuals.reserve(point_number);
double max_distance = 0;
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual =
estimator_.residualForScoring(points_.row(point_idx), model_.descriptor);
// If the residual is smaller than the maximum threshold, add it to the set of possible inliers
if (maximum_threshold > residual)
{
max_distance = MAX(max_distance, residual);
all_residuals.emplace_back(std::make_pair(residual, point_idx));
}
}
// Set the maximum distance to be slightly bigger than that of the farthest possible inlier
max_distance = max_distance +
std::numeric_limits<double>::epsilon();
// Number of possible inliers
const size_t possible_inlier_number = all_residuals.size();
// The extent of a partition
const double threshold_step = max_distance / partition_number;
// The maximum threshold considered in each partition
std::vector<double> thresholds(partition_number);
std::vector<double> thresholds_squared(partition_number);
std::vector<double> thresholds_2_squared(partition_number);
// Calculating the thresholds for each partition
for (size_t i = 0; i < partition_number; ++i)
{
thresholds[i] = (i + 1) * threshold_step;
thresholds_squared[i] = thresholds[i] * thresholds[i];
thresholds_2_squared[i] = 2 * thresholds_squared[i];
}
double residual_i, // Residual of the i-th point
residual_i_squared, // Squared residual of the i-th poin
probability_i; // Probability of the i-th point given the model
std::vector<double> inliers(partition_number, 0), // RANSAC score for each partition
probabilities(partition_number, 1); // Probabilities for each partition
for (size_t point_idx = 0; point_idx < possible_inlier_number; ++point_idx)
{
residual_i = all_residuals[point_idx].first;
residual_i_squared = residual_i * residual_i;
for (size_t i = 0; i < partition_number; ++i)
{
if (residual_i < thresholds[i])
{
probability_i = 1.0 - residual_i_squared / thresholds_squared[i];
++inliers[i];
probabilities[i] += probability_i;
}
}
}
score_ = 0;
marginalized_iteration_number_ = 0.0;
for (auto i = 0; i < partition_number; ++i)
{
score_ += probabilities[i];
marginalized_iteration_number_ += log_confidence / log(1.0 - std::pow(inliers[i] / point_number, sample_size));
}
marginalized_iteration_number_ = marginalized_iteration_number_ / partition_number;
}
|
GB_binop__bget_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bget_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__bget_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__bget_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__bget_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_uint8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bget_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__bget_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_uint8)
// C=scalar+B GB (_bind1st__bget_uint8)
// C=scalar+B' GB (_bind1st_tran__bget_uint8)
// C=A+scalar GB (_bind2nd__bget_uint8)
// C=A'+scalar GB (_bind2nd_tran__bget_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = GB_BITGET (aij, bij, uint8_t, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITGET (x, y, uint8_t, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_UINT8 || GxB_NO_BGET_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bget_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bget_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bget_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bget_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bget_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bget_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bget_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bget_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bget_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITGET (x, bij, uint8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bget_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITGET (aij, y, uint8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (x, aij, uint8_t, 8) ; \
}
GrB_Info GB (_bind1st_tran__bget_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (aij, y, uint8_t, 8) ; \
}
GrB_Info GB (_bind2nd_tran__bget_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__isnan_bool_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isnan_bool_fc64)
// op(A') function: GB (_unop_tran__isnan_bool_fc64)
// C type: bool
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = (aij)
// unaryop: cij = GB_cisnan (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cisnan (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = (aij) ; \
Cx [pC] = GB_cisnan (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isnan_bool_fc64)
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = GB_cisnan (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = GB_cisnan (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isnan_bool_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// True if the current expression is a member bounds expression
/// for a structure. Member bounds expressions can only reference
/// members and cannot reference variables.
bool IsMemberBoundsExpr;
std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
std::unique_ptr<MangleNumberingContext> MangleNumbering;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering(),
ExprContext(ExprContext) {}
/// Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T, CheckedPointerKind kind,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
CheckedArrayKind Kind, SourceRange Brackets,
DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate,
NC_UndeclaredTemplate,
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit,
SourceLocation EqualLoc = SourceLocation());
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
bool ValidateNTCheckedType(ASTContext &C, QualType VDeclType, Expr *Init);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr,
RecordDecl::Genericity GenericKind = RecordDecl::NonGeneric,
ArrayRef<TypedefDecl *> TypeParams = ArrayRef<TypedefDecl *> {nullptr, 0} );
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
FieldDecl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
/// Push the parameters listed in Params into scope.
void ActOnSetupParametersAgain(Scope* S, ArrayRef<ParmVarDecl *> Params);
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(
NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted,
bool IsUnavailable, StringRef Message, bool IsStrict,
StringRef Replacement, AvailabilityMergeKind AMK, int Priority,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex, StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// Checked C specific methods for merging function declarations.
bool CheckedCFunctionDeclCompatibility(FunctionDecl *New, FunctionDecl *Old);
bool CheckedCMergeFunctionDecls(FunctionDecl *New, FunctionDecl *Old);
bool DiagnoseCheckedCFunctionCompatibility(FunctionDecl *New,
FunctionDecl *Old);
// used for %select in diagnostics for errors involving checked types.
enum class CheckedTypeClassification {
CCT_Any,
CCT_Struct,
CCT_Union
};
// used for %select in diagnostics for errors involving redeclarations
// with bounds
enum class CheckedCBoundsError {
CCBE_Parameter,
CCBE_Return,
CCBE_Variable
};
// used for %select in diagnostics for errors involving redeclarations
// with bounds annotations.
enum class BoundsAnnotationKind {
Bounds,
IType
};
CheckedTypeClassification classifyForCheckedTypeDiagnostic(QualType qt);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr,
CheckedScopeSpecifier WrittenCSS = CSS_None,
SourceLocation CSSLoc = SourceLocation(),
SourceLocation CSMLoc = SourceLocation());
private:
CheckedScopeSpecifier CheckingKind;
// Keep a stack of saved checked scope information.
class SavedCheckedScope {
public:
SavedCheckedScope(CheckedScopeSpecifier S, SourceLocation L) :
Loc(L), Saved(S) {}
SourceLocation Loc;
CheckedScopeSpecifier Saved;
};
SmallVector<SavedCheckedScope, 8> CheckingKindStack; // can be empty
public:
CheckedScopeSpecifier GetCheckedScopeInfo() {
return CheckingKind;
}
void SetCheckedScopeInfo(CheckedScopeSpecifier CSS) {
CheckingKind = CSS;
}
void PushCheckedScopeInfo(SourceLocation Loc) {
CheckingKindStack.push_back(SavedCheckedScope(CheckingKind, Loc));
}
bool PopCheckedScopeInfo() {
if (CheckingKindStack.size() > 0) {
CheckingKind = CheckingKindStack.back().Saved;
CheckingKindStack.pop_back();
return false;
}
else
return true;
}
void DiagnoseUnterminatedCheckedScope();
bool IsCheckedScope() {
return CheckingKind != CSS_Unchecked;
}
class CheckedScopeRAII {
Sema &SemaRef;
CheckedScopeSpecifier PrevCheckingKind;
public:
CheckedScopeRAII(Sema &SemaRef, CheckedScopeSpecifier CSS)
: SemaRef(SemaRef),
PrevCheckingKind(SemaRef.CheckingKind) {
if (CSS != CSS_None)
SemaRef.CheckingKind = CSS;
}
CheckedScopeRAII(Sema &S, DeclSpec &DS) :
CheckedScopeRAII(S, DS.getCheckedScopeSpecifier()) {
}
~CheckedScopeRAII() {
SemaRef.CheckingKind = PrevCheckingKind;
}
};
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false,
CheckedScopeSpecifier CSS = CSS_None):
S(S), CheckedProperties(S, CSS) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
CheckedScopeRAII CheckedProperties;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
enum CheckedScopeTypeLocation {
CSTL_TopLevel,
CSTL_Nested,
CSTL_BoundsSafeInterface
};
/// Returns true if Ty is allowed in a checked scope:
/// - If Ty is a pointer or array type, it must be a checked pointer or
/// array type or an unchecked pointer or array type with a bounds-safe
/// interface.
/// - This rule applies recursively to any types nested within Ty.
/// - All other types are allowed in checked scopes.
/// Return false if Ty is not allowed.
bool AllowedInCheckedScope(QualType Ty,
const InteropTypeExpr *InteropType,
bool IsParam, CheckedScopeTypeLocation Loc,
CheckedScopeTypeLocation &ProblemLoc,
QualType &ProblemTy);
// Enum for diagnostic message that describes the type of declaration
// being checked.
enum CheckedDeclKind {
CDK_Parameter,
CDK_FunctionReturn,
CDK_LocalVariable,
CDK_GlobalVariable,
CDK_Member
};
/// \param D - target declaration
/// \param UseLoc - default invalid location at declaration
/// it is valid only if it is regarded as use of variable
/// \returns true if target declaration is valid checked decl
bool DiagnoseCheckedDecl(const ValueDecl *D,
SourceLocation UseLoc = SourceLocation());
bool DiagnoseTypeInCheckedScope(QualType Ty, SourceLocation Start, SourceLocation End);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op,
bool isCheckedScope = false);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Checked C Extension ----------------------===//
private:
QualType ValidateBoundsExprArgument(Expr *Arg);
public:
ExprResult ActOnNullaryBoundsExpr(SourceLocation BoundKWLoc,
BoundsExpr::Kind Kind,
SourceLocation RParenLoc);
ExprResult ActOnCountBoundsExpr(SourceLocation BoundsKWLoc,
BoundsExpr::Kind Kind, Expr *CountExpr,
SourceLocation RParenLoc);
ExprResult ActOnRangeBoundsExpr(SourceLocation BoundsKWLoc, Expr *LowerBound,
Expr *UpperBound, SourceLocation RParenLoc);
ExprResult CreateRangeBoundsExpr(SourceLocation BoundsKWLoc, Expr *LowerBound,
Expr *UpperBound,
RelativeBoundsClause *Relative,
SourceLocation RParenLoc);
ExprResult ActOnBoundsInteropType(SourceLocation TypeKWLoc, ParsedType Ty,
SourceLocation RParenLoc);
ExprResult CreateBoundsInteropTypeExpr(SourceLocation TypeKWLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc);
ExprResult CreatePositionalParameterExpr(unsigned Index, QualType QT);
RelativeBoundsClause* ActOnRelativeTypeBoundsClause(SourceLocation BoundsKWLoc,
ParsedType Ty,
SourceLocation RParenLoc);
RelativeBoundsClause *
CreateRelativeTypeBoundsClause(SourceLocation BoundsKWLoc,
TypeSourceInfo *TyInfo,
SourceLocation RParenLoc);
RelativeBoundsClause* ActOnRelativeConstExprClause(Expr *ConstExpr,
SourceLocation BoundsKWLoc,
SourceLocation RParenLoc);
bool CheckBoundsCastBaseType(Expr *E1);
ExprResult
ActOnBoundsCastExprBounds(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind,
SourceLocation LAnagleBracketLoc, ParsedType D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc, SourceLocation RParenLoc,
Expr *E1, BoundsExpr *ParsedBounds);
ExprResult ActOnBoundsCastExprSingle(
Scope *S, SourceLocation OpLoc, tok::TokenKind Kind,
SourceLocation LAnagleBracketLoc, ParsedType D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E1);
ExprResult BuildBoundsCastExpr(SourceLocation OpLoc, tok::TokenKind Kind,
TypeSourceInfo *CastTypeInfo,
SourceRange AngleBrackets,
SourceRange Paren, Expr *E1,
BoundsExpr *bounds);
bool DiagnoseBoundsDeclType(QualType Ty, DeclaratorDecl *D,
BoundsAnnotations &BA, bool IsReturnAnnots);
/// \\brief Update information in ASTContext tracking for a member what
/// bounds declarations depend upon it. FD is the member whose
/// bounds are given by Bounds.
void TrackMemberBoundsDependences(FieldDecl *FD, BoundsExpr *Bounds);
void ActOnBoundsDecl(DeclaratorDecl *D, BoundsAnnotations Annots,
bool MergeDeferredBounds = false);
void ActOnEmptyBoundsDecl(DeclaratorDecl *D);
void ActOnInvalidBoundsDecl(DeclaratorDecl *D);
/// \brief Add default bounds/interop type expressions to Annots, if appropriate.
void InferBoundsAnnots(QualType Ty, BoundsAnnotations &Annots, bool IsParam);
// \#pragma CHECKED_SCOPE.
enum PragmaCheckedScopeKind {
PCSK_On,
PCSK_Off,
PCSK_BoundsOnly,
PCSK_Push,
PCSK_Pop
};
void ActOnPragmaCheckedScope(PragmaCheckedScopeKind Kind, SourceLocation Loc);
void DiagnoseUnterminatedPragmaCheckedScopePush();
BoundsExpr *CreateInvalidBoundsExpr();
/// /brief Synthesize the interop type expression implied by the presence
/// of a bounds expression. Ty is the original unchecked type. Returns null
/// if none exists.
InteropTypeExpr *SynthesizeInteropTypeExpr(QualType Ty, bool IsParam);
BoundsExpr *CreateCountForArrayType(QualType QT);
// _Return_value in Checked C bounds expressions.
ExprResult ActOnReturnValueExpr(SourceLocation Loc);
/// \brief When non-NULL, the type of the '_Return_value' expression.
QualType BoundsExprReturnValue;
/// \brief RAII object used to temporarily set the the type of _Return_value
class CheckedCReturnValueRAII {
Sema &S;
QualType OldReturnValue;
public:
CheckedCReturnValueRAII(Sema &S, QualType ReturnVal) : S(S) {
OldReturnValue = S.BoundsExprReturnValue;
S.BoundsExprReturnValue = ReturnVal;
}
~CheckedCReturnValueRAII() {
S.BoundsExprReturnValue = OldReturnValue;
}
};
typedef bool
(*ParseDeferredBoundsCallBackFn)(void *P,
std::unique_ptr<CachedTokens> Toks,
ArrayRef<ParmVarDecl *> Params,
BoundsAnnotations &Result,
const Declarator &D);
void SetDeferredBoundsCallBack(void *OpaqueData, ParseDeferredBoundsCallBackFn p);
ParseDeferredBoundsCallBackFn DeferredBoundsParser;
void *DeferredBoundsParserData;
// Represents the context where an expression must be non-modifying.
enum NonModifyingContext {
NMC_Unknown,
NMC_Dynamic_Check,
NMC_Count, // Bounds count expression.
NMC_Byte_Count, // Bounds byte count expression.
NMC_Range, // Bounds range expression.
NMC_Function_Return, // Argument for parameter used in function
// return bounds.
NMC_Function_Parameter // Argument for parameter used in function
// parameter bounds.
};
/// /brief Checks whether an expression is non-modifying
/// (see Checked C Spec, 3.6.1). Returns true if the expression is non-modifying,
/// false otherwise.
enum NonModifyingMessage {
NMM_None,
NMM_Error,
NMM_Note
};
/// \brief Checks whether an expression is non-modifying
/// (see Checked C Spec, 3.6.1). Returns true if the expression is non-modifying,
/// false otherwise.
bool CheckIsNonModifying(Expr *E, NonModifyingContext Req =
NonModifyingContext::NMC_Unknown,
NonModifyingMessage = NMM_Error);
BoundsExpr *CheckNonModifyingBounds(BoundsExpr *Bounds, Expr *E);
ExprResult ActOnFunctionTypeApplication(ExprResult TypeFunc, SourceLocation Loc, ArrayRef<TypeArgument> Args);
RecordDecl *ActOnRecordTypeApplication(RecordDecl *Base, ArrayRef<TypeArgument> TypeArgs);
const ExistentialType *ActOnExistentialType(ASTContext &Context, const Type *TypeVar, QualType InnerType);
/// Complete a delayed type application by populating the record's fields with the right types.
/// Should only be called once per delayed 'RecordDecl'.
void CompleteTypeAppFields(RecordDecl *Incomplete);
// Determine whether the given 'RecordDecl' is part of an 'expanding cycle'.
// Generic records that form part of an expanding cycle can't be instantiated because they
// produce an infinite number of type applications (because we construct the transitive closure
// of type applications eagerly).
//
// Consider the graph of type parameter dependencies as defined below. An expanding cycle
// is a cycle in the graph that contains at least one expanding edge.
//
// We show how the graph is built via an example. Suppose we have three generic structs A<T>, B<U>, C<V>:
//
// struct A _For_any(T) { struct A<T>* a; struct B<T> *b; }
// struct B _For_any(U) { struct C<struct C<U> > *c; }
// struct C _For_any(V) { struct A<V>* a; }
//
// The vertices of the graph are T, U, and V (the type parameter, alpha re-named if needed).
// There is an edge between nodes N1 and N2 if N2 is used in a field anywhere in the position of N1.
// If N2 appears at the "top-level" replacing N1, then the resulting edge is "non-expanding".
// Otheriwse, if N2 appears nested within the argument that replaces N1, then the edge is "expanding".
//
// In our example the edges are:
//
// non-expanding: T -> T, T -> U, V -> T, U -> V
// expanding: U => V
//
// T -> U, U => V, V -> T is an expanding cycle because it contains the expanding edge U => V
//
// The cycle will be detected when C is processed (because C is defined last). If we tried to instantiate C, we would
// end up performing the following type applications:
// A<V>, B<V>, C<C<V>>, A<C<V>>, B<C<V>>, C<C<C<V>>>, ...
//
// The definition of expanding cycle is adapted from the 'ECMA 335 Common Language Infrastructure (CLI) Partitions I to VI' standard.
// Specifically, Partition II, section II.9.2 'Generics and recursive inheritance graphs'.
bool DiagnoseExpandingCycles(RecordDecl *Base, SourceLocation Loc);
QualType SubstituteTypeArgs(QualType QT, ArrayRef<TypeArgument> TypeArgs);
std::vector<const TypedefNameDecl *> FindFreeVariableDecls(QualType T);
bool AbstractForFunctionType(BoundsAnnotations &BA,
ArrayRef<DeclaratorChunk::ParamInfo> Params);
/// \brief Take a bounds expression with positional parameters from a function
/// type and substitute DeclRefs to the corresonding parameters in Params.
BoundsExpr *ConcretizeFromFunctionType(BoundsExpr *Expr,
ArrayRef<ParmVarDecl *> Params);
/// \brief Take a member bounds expression with member references and
/// replace the member references with member access expressions using
/// MemberBase as the base. Returns a nullptr if there is an error.
BoundsExpr *MakeMemberBoundsConcrete(Expr *MemberBase, bool IsArrow,
BoundsExpr *Bounds);
BoundsExpr *ConcretizeFromFunctionTypeWithArgs(BoundsExpr *Bounds, ArrayRef<Expr *> Args,
NonModifyingContext ErrorKind);
/// ConvertToFullyCheckedType: convert an expression E to a fully checked type. This
/// is used to retype declrefs and member exprs in checked scopes with bounds-safe
/// interfaces. The Checked C spec that says that such uses in checked scopes shall be
/// treated as having "checked type".
ExprResult ConvertToFullyCheckedType(Expr *E, InteropTypeExpr *BA, bool IsParamUse,
ExprValueKind VK);
/// GetArrayPtrDereference - determine if an lvalue expression is a
/// dereference of an _Array_ptr or _Nt_array_ptr (via '*" or an array
/// subscript operator). If it is, return the actual dereference expression
/// and set Result to the pointer type being dereferenced. Otherwise, return
/// null.
Expr *GetArrayPtrDereference(Expr *E, QualType &Result);
/// ReplaceAssignmentImplicitCast: E has had assignment conversion rules
/// applied to it. If an implicit cast has been introduced because of the
/// assignment conversion rules, replace it with an explicit cast.
/// This allows us to substitute E into other operator expressions without worrying
/// about the different implicit conversion rules between assignments and
//// other operators. Sema tree rewriting assumes that semantic
/// analysis will recreate implicit casts. That doesn't happen properly if
/// E is taken from an assignment expression and used in another operator expression.
Expr *MakeAssignmentImplicitCastExplicit(Expr *E);
enum BoundsDeclarationCheck {
BDC_Assignment,
BDC_Initialization
};
/// \brief Check that address=of operation is not taking the
/// address of members used in bounds.
void CheckAddressTakenMembers(UnaryOperator *AddrOf);
/// \brief Check whether E contains a return value expression.
bool ContainsReturnValueExpr(Expr *E);
/// \brief Wrap a call expression in a Checked C temporay binding
/// expression, if a temporary is needed to describe the bounds
/// of the result of the call expression.
ExprResult CreateTemporaryForCallIfNeeded(ExprResult R);
/// CheckFunctionBodyBoundsDecls - check bounds declarations within a function
/// body.
void CheckFunctionBodyBoundsDecls(FunctionDecl *FD, Stmt *Body);
/// CheckTopLevelBoundsDecls - check bounds declarations for variable declarations
/// not within a function body.
void CheckTopLevelBoundsDecls(VarDecl *VD);
// WarnDynamicCheckAlwaysFails - Adds a warning if an explicit dynamic check
// will always fail.
void WarnDynamicCheckAlwaysFails(const Expr *Condition);
// If the VarDecl D has a byte_count or count bounds expression,
// NormalizeBounds expands it to a range bounds expression. The expanded
// range bounds are attached to the VarDecl D to avoid recomputing the
// normalized bounds for D.
BoundsExpr *NormalizeBounds(const VarDecl *D);
// This is wrapper around CheckBoundsDeclaration::ExpandToRange. This
// provides an easy way to invoke this function from outside the class. Given
// a byte_count or count bounds expression for the VarDecl D, ExpandToRange
// will expand it to a range bounds expression.
BoundsExpr *ExpandBoundsToRange(const VarDecl *D, const BoundsExpr *B);
//
// Track variables that in-scope bounds declarations depend upon.
// TODO: generalize this to other lvalue expressions.
class BoundsDependencyTracker {
public:
typedef SmallVector<VarDecl *, 2> VarBoundsDecls;
typedef VarBoundsDecls::iterator VarBoundsIterator;
typedef llvm::iterator_range<VarBoundsIterator> VarBoundsIteratorRange;
// mapping from variables to bounds that depend upon the variables.
typedef std::map<VarDecl *, VarBoundsDecls> DependentMap;
private:
// Map variables to the bounds declarations that are
// in scope and depend upon them.
DependentMap Map;
// Track the bounds that are in scope so that we can remove them from the
// dependent map when the scope is exited.
std::vector<VarDecl *> BoundsInScope;
public:
BoundsDependencyTracker() {}
// Call these when entering/exiting scopes so that we can track when
// variables go out of scope. EnterScope returns an integer
// that should be passed to the corresponding ExitScope call.
unsigned EnterScope();
void ExitScope(unsigned scopeBegin);
// If D has a bounds declaration, add its dependencies to the existing
// scope.
void Add(VarDecl *D);
VarBoundsIteratorRange DependentBoundsDecls(VarDecl *D) {
auto Iter = Map.find(D);
if (Iter == Map.end())
return VarBoundsIteratorRange(nullptr, nullptr);
return VarBoundsIteratorRange(Iter->second.begin(),Iter->second.end());
}
void Dump(raw_ostream &OS);
};
BoundsDependencyTracker BoundsDependencies;
// Map expressions that modify lvalues (assignments and pre/post
// increment/decrement operations) to bounds that may depend on the modified
// lvalues. We check the validity of bounds declarations after
// expression statements using data flow analysis. During the analysis,
// we need to know whether an expression modifies an lvalue involved in a
// bounds invariant. The AST traversal order for determining this is lexical
// and conflicts with preferred orderings for dataflow analysis, so we
// precompute this information before analyzing a function body.
class ModifiedBoundsDependencies {
public:
// A C lvalue expression with bounds on values stored in the lvalue.
// It is either a variable or a member expression.
struct LValueWithBounds {
LValueWithBounds(llvm::PointerUnion<VarDecl *, MemberExpr *> Target,
BoundsExpr *Bounds) : Target(Target), Bounds(Bounds) {}
llvm::PointerUnion<VarDecl *, MemberExpr *> Target;
BoundsExpr *Bounds; // Bounds for target.
};
typedef SmallVector<LValueWithBounds,2> LValuesWithBounds;
// Map assignments or pre/post increment/decrement expressions to bounds
// that depend upon the lvalue modified by the expressions.
typedef std::map<Expr *, LValuesWithBounds> DependentBounds;
void Add(Expr *E, llvm::PointerUnion<VarDecl *, MemberExpr *> LValue,
BoundsExpr *Bounds);
void Dump(raw_ostream &OS);
ModifiedBoundsDependencies() {}
DependentBounds Tracker;
};
/// \brief Compute a mapping from statements that modify lvalues to
/// in-scope bounds declarations that depend on those lvalues.
/// FD is the function being declared and Body is the body of the
/// function. They are passed in separately because Body hasn't
/// been attached to FD yet.
void ComputeBoundsDependencies(ModifiedBoundsDependencies &Tracker,
FunctionDecl *FD, Stmt *Body);
/// \brief RAII class used to indicate that we are substituting an expression
/// into another expression during bounds checking. We need to suppress
/// diagnostics emission during this. We are doing type-preserving
/// substitutions, so we don't expect semantic errors during substitution.
/// There could be warnings, which would confuse users. The warnings could
/// could also be escalated to errors, which would cause compilation failures.
class ExprSubstitutionScope {
Sema &SemaRef;
bool PrevDisableSubstitionDiagnostics;
public:
explicit ExprSubstitutionScope(Sema &SemaRef,
bool DisableDiagnostics = true)
: SemaRef(SemaRef),
PrevDisableSubstitionDiagnostics(
SemaRef.DisableSubstitionDiagnostics) {
SemaRef.DisableSubstitionDiagnostics = DisableDiagnostics;
}
~ExprSubstitutionScope() {
SemaRef.DisableSubstitionDiagnostics =
PrevDisableSubstitionDiagnostics;
}
};
bool DisableSubstitionDiagnostics;
ExprResult ActOnPackExpression(Expr *PackedExpr,
QualType ExistType,
TypeArgument SubstArg,
SourceLocation StartLoc,
SourceLocation EndLoc);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *
startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange,
TypeSourceInfo *MethodType, SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Optional<std::pair<unsigned, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
ConceptDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
// Concepts
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
unsigned SpellingListIndex, bool InInstantiation = false);
void AddParameterABIAttr(SourceRange AttrRange, Decl *D,
ParameterABI ABI, unsigned SpellingListIndex);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min,
Expr *Max, unsigned SpellingListIndex);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min,
Expr *Max, unsigned SpellingListIndex);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
public:
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
NamedDeclSetType &SameDirectiveDecls);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion,
bool isBoundsSafeInterfaceCast = false);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// IncompatibleCheckedCVoid - Assignments to/from void pointers to pointers
/// to data containing checked pointers is not allowed in regular checked
/// scopes. It is allowed only in unchecked and checked bounds_only scopes.
IncompatibleCheckedCVoid,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true,
QualType LHSInteropType = QualType());
public:
/// \brief: Given a value with type Ty that has a bounds declaration,
/// compute the bounds-safe interface type. Returns a null QualType
/// if nnoe exists.
QualType SynthesizeInteropType(QualType Ty, bool isParam);
/// Rewrite function types with bounds-safe interfaces on unchecked
/// types to use the checked types specified by the interfaces. Recursively
/// apply the rewrite to function types nested within the type.
QualType RewriteBoundsSafeInterfaceTypes(QualType Ty);
/// \brief Get the bounds-safe interface type for LHS.
/// Returns a null QualType if there isn't one.
QualType GetCheckedCLValueInteropType(ExprResult LHS);
/// \brief Get the bounds-safe interface type for RHS.
/// Returns a null QualType if there isn't one.
QualType GetCheckedCRValueInteropType(ExprResult RHS);
/// \brief If T is an array type, create a checked array type version of T.
/// This includes propagating the checked property to nested array types. If
/// a valid checked array type cannot be constructed and Diagnose is true,
/// print a diagnostic message for the problem.
QualType MakeCheckedArrayType(QualType T, bool Diagnose = false,
SourceLocation Loc = SourceLocation());
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
QualType BaseType, QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
/// \brief RAII object that handles state changes for processing a member
// bounds expressions.
class EnterMemberBoundsExprRAII {
Sema &S;
bool SavedMemberBounds;
public:
EnterMemberBoundsExprRAII(Sema &S)
: S(S), SavedMemberBounds(S.IsMemberBoundsExpr)
{
S.IsMemberBoundsExpr = true;
}
~EnterMemberBoundsExprRAII() {
S.IsMemberBoundsExpr = SavedMemberBounds;
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
NodeMapping.h |
/*****************************************************************************
*
* Copyright (c) 2003-2020 by The University of Queensland
* http://www.uq.edu.au
*
* Primary Business: Queensland, Australia
* Licensed under the Apache License, version 2.0
* http://www.apache.org/licenses/LICENSE-2.0
*
* Development until 2012 by Earth Systems Science Computational Center (ESSCC)
* Development 2012-2013 by School of Earth Sciences
* Development from 2014-2017 by Centre for Geoscience Computing (GeoComp)
* Development from 2019 by School of Earth and Environmental Sciences
**
*****************************************************************************/
/*
NodeMapping provides a mapping from the local nodes typically to the
degrees of freedom, the reduced degrees of freedom or the reduced node set.
*/
#ifndef __FINLEY_NODEMAPPING_H__
#define __FINLEY_NODEMAPPING_H__
#include "Util.h"
namespace finley {
struct NodeMapping {
/// resets both map and target.
void clear()
{
target.clear();
map.clear();
}
/// initializes a node mapping. The target array is copied and a reverse
/// map created.
/// theTarget[i]=unused means that no target is defined for FEM node i.
void assign(const std::vector<index_t>& theTarget, index_t unused)
{
if (theTarget.empty())
return;
std::pair<index_t,index_t> range(
util::getFlaggedMinMaxInt(theTarget.size(), &theTarget[0], unused));
if (range.first < 0) {
throw escript::ValueError("NodeMapping: target has negative entry.");
}
// now we assume min(target)=0!
const dim_t numTargets = range.first<=range.second ? range.second+1 : 0;
target.assign(theTarget.begin(), theTarget.end());
const index_t targetSize = target.size();
map.assign(numTargets, -1);
bool err = false;
#pragma omp parallel
{
#pragma omp for
for (index_t i=0; i<targetSize; ++i) {
if (target[i] != unused)
map[target[i]]=i;
}
// sanity check
#pragma omp for
for (index_t i=0; i<numTargets; ++i) {
if (map[i]==-1) {
#pragma omp critical
err=true;
}
}
}
if (err)
throw escript::ValueError("NodeMapping: target does not define a continuous labeling.");
}
/// returns the number of target nodes (number of items in the map array)
dim_t getNumTargets() const { return map.size(); }
/// target[i] defines the target of FEM node i=0,...,numNodes-1
std::vector<index_t> target;
/// maps the target nodes back to the FEM nodes: target[map[i]]=i
std::vector<index_t> map;
};
} // namespace finley
#endif // __FINLEY_NODEMAPPING_H__
|
general_basis_op.h | #ifndef _GENERAL_BASIS_OP_H
#define _GENERAL_BASIS_OP_H
#include <iostream>
#include <complex>
#include <algorithm>
#include <limits>
#include "general_basis_core.h"
#include "numpy/ndarraytypes.h"
#include "misc.h"
#include "openmp.h"
namespace basis_general {
template<class T>
int inline check_imag(std::complex<double> m,std::complex<T> *M){
M[0].real(m.real());
M[0].imag(m.imag());
return 0;
}
template<class T>
int inline check_imag(std::complex<double> m,T *M){
if(std::abs(m.imag())>1.1e-15){
return 1;
}
else{
M[0] = m.real();
return 0;
}
}
template<class I, class J, class K, class T>
int general_op(general_basis_core<I> *B,
const int n_op,
const char opstr[],
const int indx[],
const std::complex<double> A,
const bool full_basis,
const npy_intp Ns,
const I basis[],
const J n[],
K row[],
K col[],
T M[]
)
{
int err = 0;
#pragma omp parallel
{
const int nt = B->get_nt();
const npy_intp chunk = std::max(Ns/(100*omp_get_num_threads()),(npy_intp)1);
int g[__GENERAL_BASIS_CORE__max_nt];
#pragma omp for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err != 0){
continue;
}
I r = basis[i];
std::complex<double> m = A;
int local_err = B->op(r,m,n_op,opstr,indx);;
if(local_err == 0){
int sign = 1;
for(int k=0;k<nt;k++){
g[k]=0;
}
K j = i;
if(r != basis[i]){
I rr = B->ref_state(r,g,sign);
if(full_basis){
j = Ns - (npy_intp)rr - 1;
}
else{
j = binary_search(Ns,basis,rr);
}
}
if(j >= 0){
for(int k=0;k<nt;k++){
double q = (2.0*M_PI*B->qs[k]*g[k])/B->pers[k];
m *= std::exp(std::complex<double>(0,-q));
}
m *= sign * std::sqrt(double(n[j])/double(n[i]));
local_err = check_imag(m,&M[i]);
col[i]=i;
row[i]=j;
}
else{
col[i] = i;
row[i] = i;
M[i] = std::numeric_limits<T>::quiet_NaN();
}
}
if(local_err != 0){
#pragma omp critical
err = local_err;
}
}
}
return err;
}
template<class T>
int inline atomic_add(const std::complex<double> m,std::complex<T> *M){
T * M_v = reinterpret_cast<T*>(M);
const T m_real = m.real();
const T m_imag = m.imag();
#pragma omp atomic
M_v[0] += m_real;
#pragma omp atomic
M_v[1] += m_imag;
return 0;
}
template<class T>
int inline atomic_add(const std::complex<double> m,T *M){
if(std::abs(m.imag())>1.1e-15){
return 1;
}
else{
const T m_real = m.real();
#pragma omp atomic
M[0] += m_real;
return 0;
}
}
template<class I, class J, class K>
int general_inplace_op(general_basis_core<I> *B,
const bool conjugate,
const bool transpose,
const int n_op,
const char opstr[],
const int indx[],
const std::complex<double> A,
const bool full_basis,
const npy_intp Ns,
const npy_intp nvecs,
const I basis[],
const J n[],
const K v_in[],
K v_out[])
{
int err = 0;
#pragma omp parallel
{
const int nt = B->get_nt();
const npy_intp chunk = std::max(Ns/(100*omp_get_num_threads()),(npy_intp)1);
int g[__GENERAL_BASIS_CORE__max_nt];
#pragma omp for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err != 0){
continue;
}
I r = basis[i];
std::complex<double> m = A;
int local_err = B->op(r,m,n_op,opstr,indx);
if(local_err == 0){
int sign = 1;
npy_intp j = i;
if(r != basis[i]){
I rr = B->ref_state(r,g,sign);
if(full_basis){
j = Ns - (npy_intp)rr - 1;
}
else{
j = binary_search(Ns,basis,rr);
}
}
if(j >= 0){
for(int k=0;k<nt;k++){
double q = (2.0*M_PI*B->qs[k]*g[k])/B->pers[k];
m *= std::exp(std::complex<double>(0,-q));
}
m *= sign * std::sqrt(double(n[j])/double(n[i]));
if(transpose){
const K * v_in_col = v_in + j * nvecs;
K * v_out_row = v_out + i * nvecs;
if(conjugate){
for(int k=0;k<nvecs;k++){
const std::complex<double> ME = std::complex<double>(v_in_col[k]) * std::conj(m);
local_err = atomic_add(ME,&v_out_row[k]);
if(local_err){
break;
}
}
}
else{
for(int k=0;k<nvecs;k++){
const std::complex<double> ME = std::complex<double>(v_in_col[k]) * m;
local_err = atomic_add(ME,&v_out_row[k]);
if(local_err){
break;
}
}
}
}
else{
const K * v_in_col = v_in + i * nvecs;
K * v_out_row = v_out + j * nvecs;
if(conjugate){
for(int k=0;k<nvecs;k++){
const std::complex<double> ME = std::complex<double>(v_in_col[k]) * std::conj(m);
local_err = atomic_add(ME,&v_out_row[k]);
if(local_err){
break;
}
}
}
else{
for(int k=0;k<nvecs;k++){
const std::complex<double> ME = std::complex<double>(v_in_col[k]) * m;
local_err = atomic_add(ME,&v_out_row[k]);
if(local_err){
break;
}
}
}
}
}
}
if(local_err != 0){
#pragma omp critical
err = local_err;
}
}
}
return err;
}
template<class I, class T>
int general_op_bra_ket(general_basis_core<I> *B,
const int n_op,
const char opstr[],
const int indx[],
const std::complex<double> A,
const npy_intp Ns,
const I ket[], // col
I bra[], // row
T M[]
)
{
int err = 0;
#pragma omp parallel
{
const int nt = B->get_nt();
const npy_intp chunk = std::max(Ns/(100*omp_get_num_threads()),(npy_intp)1);
int g[__GENERAL_BASIS_CORE__max_nt];
#pragma omp for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err != 0){
continue;
}
std::complex<double> m = A;
const I s = ket[i];
I r = ket[i];
int local_err = B->op(r,m,n_op,opstr,indx);
if(local_err == 0){
int sign = 1;
if(r != s){ // off-diagonal matrix element
r = B->ref_state(r,g,sign);
// use check_state to determine if state is a representative (same routine as in make-general_basis)
double norm_r = B->check_state(r);
if(!check_nan(norm_r) && norm_r > 0){ // ref_state is a representative
for(int k=0;k<nt;k++){
double q = (2.0*M_PI*B->qs[k]*g[k])/B->pers[k];
m *= std::exp(std::complex<double>(0,-q));
}
double norm_s = B->check_state(s);
m *= sign * std::sqrt(norm_r/norm_s);
local_err = check_imag(m,&M[i]); // assigns value to M[i]
bra[i] = r;
}
else{ // ref state in different particle number sector
M[i] = std::numeric_limits<T>::quiet_NaN();
bra[i] = s;
}
}
else{ // diagonal matrix element
m *= sign;
local_err = check_imag(m,&M[i]); // assigns value to M[i]
bra[i] = s;
}
}
if(local_err != 0){
#pragma omp critical
err = local_err;
}
}
}
return err;
}
template<class I, class T>
int general_op_bra_ket_pcon(general_basis_core<I> *B,
const int n_op,
const char opstr[],
const int indx[],
const std::complex<double> A,
const npy_intp Ns,
const std::set<std::vector<int>> &Np_set, // array with particle conserving sectors
const I ket[], // col
I bra[], // row
T M[]
)
{
int err = 0;
#pragma omp parallel
{
const std::set<std::vector<int>> Np_set_local = Np_set;
const int nt = B->get_nt();
const npy_intp chunk = std::max(Ns/(100*omp_get_num_threads()),(npy_intp)1);
int g[__GENERAL_BASIS_CORE__max_nt];
#pragma omp for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err != 0){
continue;
}
std::complex<double> m = A;
const I s = ket[i];
I r = ket[i];
int local_err = B->op(r,m,n_op,opstr,indx);
if(local_err == 0){
int sign = 1;
if(r != s){ // off-diagonal matrix element
r = B->ref_state(r,g,sign);
bool pcon_bool = B->check_pcon(r,Np_set_local);
if(pcon_bool){ // reference state within same particle-number sector(s)
// use check_state to determine if state is a representative (same routine as in make-general_basis)
double norm_r = B->check_state(r);
if(!check_nan(norm_r) && norm_r > 0){ // ref_state is a representative
for(int k=0;k<nt;k++){
double q = (2.0*M_PI*B->qs[k]*g[k])/B->pers[k];
m *= std::exp(std::complex<double>(0,-q));
}
double norm_s = B->check_state(s);
m *= sign * std::sqrt(norm_r/norm_s);
local_err = check_imag(m,&M[i]); // assigns value to M[i]
bra[i] = r;
}
else{ // ref_state not a representative
M[i] = std::numeric_limits<T>::quiet_NaN();
bra[i] = s;
}
}
else{ // ref state in different particle number sector
M[i] = std::numeric_limits<T>::quiet_NaN();
bra[i] = s;
}
}
else{ // diagonal matrix element
for(int k=0;k<nt;k++){
double q = (2.0*M_PI*B->qs[k]*g[k])/B->pers[k];
m *= std::exp(std::complex<double>(0,-q));
}
m *= sign;
local_err = check_imag(m,&M[i]); // assigns value to M[i]
bra[i] = s;
}
}
if(local_err != 0){
#pragma omp critical
err = local_err;
}
}
}
return err;
}
}
#endif
|
convolution_1x1_packnto1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_packnto1_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_packnto1_rvv(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_packnto1_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * packn;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
vfloat32m1_t _val = vle32_v_f32m1(r0, vl);
vse32_v_f32m1(outptr, _val, vl);
r0 += packn * 2;
outptr += packn;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_packnto1_rvv(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
GB_unop__sinh_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sinh_fp32_fp32)
// op(A') function: GB (_unop_tran__sinh_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = sinhf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = sinhf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = sinhf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SINH || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sinh_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = sinhf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = sinhf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sinh_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/engine.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#include "./operator_tune.h"
#include "../engine/openmp.h"
#ifdef __CUDACC__
#include "../common/cuda_utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
template<>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template<>
inline int get_num_threads<cpu>(const int N) {
return engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
#define MXNET_NDIM_SWITCH(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
/* \brief Compute flattened index given coordinates and shape. */
template<int ndim>
MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template<int ndim>
MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret += coord[i] * stride[i];
}
return ret;
}
/* Combining unravel and dot */
template<int ndim>
MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx, const Shape<ndim>& stride) {
++(*coord)[ndim-1];
*idx += stride[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx = *idx + stride[i-1] - shape[i] * stride[i];
}
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx1, const Shape<ndim>& stride1,
index_t* idx2, const Shape<ndim>& stride2) {
++(*coord)[ndim-1];
*idx1 += stride1[ndim-1];
*idx2 += stride2[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i];
*idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i];
}
}
/*!
* \brief Simple copy data from one blob to another
* \param to Destination blob
* \param from Source blob
*/
template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
MSHADOW_TYPE_SWITCH(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(), from.FlatTo1D<xpu, DType>(), s);
} else {
MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, {
to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s));
})
}
})
}
/*! \brief Binary op backward gradient OP wrapper */
template<typename GRAD_OP>
struct backward_grad {
/* \brief Backward calc with grad
* \param a - output grad
* \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies)
* \return input grad
*/
template<typename DType, typename ...Args>
MSHADOW_XINLINE static DType Map(DType a, Args... args) {
return DType(a * GRAD_OP::Map(args...));
}
};
/*! \brief Binary op backward gradient OP wrapper (tuned) */
template<typename GRAD_OP>
struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable {
using backward_grad<GRAD_OP>::Map;
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template<typename OP, int req>
struct op_with_req {
typedef OP Operation;
/*! \brief input is one tensor */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is tensor and two scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in,
const DType value_1, const DType value_2) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2));
}
/*! \brief No inputs (ie fill to constant value) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out) {
KERNEL_ASSIGN(out[i], req, OP::Map());
}
/*! \brief input is single scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(value));
}
/*! \brief inputs are two tensors and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out,
const DType *input_1, const DType *input_2, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value));
}
/*! \brief inputs are three tensors (ie backward grad with binary grad function) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out,
const DType *input_1,
const DType *input_2,
const DType *input_3) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i]));
}
};
template<typename OP, typename xpu>
struct Kernel;
/*!
* \brief CPU Kernel launcher
* \tparam OP Operator to launch
*/
template<typename OP>
struct Kernel<OP, cpu> {
/*!
* \brief Launch a generic CPU kernel.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() functoion
*/
template<typename ...Args>
inline static bool Launch(mshadow::Stream<cpu> *, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch CPU kernel which has OMP tuning data available.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam PRIMITIVE_OP The primitive operation to use for tuning
* \tparam DType Data type
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() functoion
*/
template<typename PRIMITIVE_OP, typename DType, typename ...Args>
static void LaunchTuned(mshadow::Stream<cpu> *, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP(
static_cast<size_t>(N), static_cast<size_t>(omp_threads))) {
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch custom-tuned kernel where each thread is set to
* operate on a contiguous partition
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param N Number of iterations
* \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions
*/
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<cpu> *s, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
OP::Map(0, N, args...);
} else {
const int length = (N + omp_threads - 1) / omp_threads;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i += length) {
OP::Map(i, i + length > N ? N - i : length, args...);
}
}
#else
OP::Map(0, N, args...);
#endif
}
/*!
* \brief Launch a tunable OP with implicitly-supplied data type
* \tparam DType Data type
* \tparam T OP type
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() functoion
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const int N, DType *dest, Args... args) {
LaunchTuned<T, DType>(s, N, dest, args...);
return true;
}
/*!
* \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req)
* \tparam DType Data type
* \tparam T Wrapper type
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() functoion
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const int N, DType *dest, Args... args) {
LaunchTuned<typename T::Operation, DType>(s, N, dest, args...);
return true;
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel_ex(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, 1, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
/*! \brief Launch GPU kernel */
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel);
}
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel_ex<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex);
}
};
#endif // __CUDACC__
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template<int val>
struct set_to_int : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to zero and one
*/
using set_zero = set_to_int<0>;
using set_one = set_to_int<1>;
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
maxwell_zeroBC.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_sstruct_ls.h"
HYPRE_Int
hypre_ParVectorZeroBCValues(hypre_ParVector *v,
HYPRE_Int *rows,
HYPRE_Int nrows)
{
HYPRE_Int ierr= 0;
hypre_Vector *v_local = hypre_ParVectorLocalVector(v);
hypre_SeqVectorZeroBCValues(v_local, rows, nrows);
return ierr;
}
HYPRE_Int
hypre_SeqVectorZeroBCValues(hypre_Vector *v,
HYPRE_Int *rows,
HYPRE_Int nrows)
{
HYPRE_Real *vector_data = hypre_VectorData(v);
HYPRE_Int i;
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < nrows; i++)
vector_data[rows[i]]= 0.0;
return ierr;
}
|
c-tree.h | /* Definitions for C parsing and type checking.
Copyright (C) 1987-2013 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_C_TREE_H
#define GCC_C_TREE_H
#include "c-family/c-common.h"
#include "diagnostic.h"
/* struct lang_identifier is private to c-decl.c, but langhooks.c needs to
know how big it is. This is sanity-checked in c-decl.c. */
#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \
(sizeof (struct c_common_identifier) + 3 * sizeof (void *))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
nonzero if the definition of the type has already started. */
#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE)
/* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable
declarations whose type would be completed by completing that type. */
#define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE)
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword,
and C_RID_YYCODE is the token number wanted by Yacc. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID)
/* Record whether a type or decl was written with nonconstant size.
Note that TYPE_SIZE may have simplified to a constant. */
#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE)
#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE)
/* Record whether a type is defined inside a struct or union type.
This is used for -Wc++-compat. */
#define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE)
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was defined without an explicit
return type. */
#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */
#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has
been declared. */
#define C_DECL_DECLARED_BUILTIN(EXP) \
DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP))
/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a
built-in prototype and does not have a non-built-in prototype. */
#define C_DECL_BUILTIN_PROTOTYPE(EXP) \
DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a decl was declared register. This is strictly a
front-end flag, whereas DECL_REGISTER is used for code generation;
they may differ for structures with volatile fields. */
#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP)
/* Record whether a decl was used in an expression anywhere except an
unevaluated operand of sizeof / typeof / alignof. This is only
used for functions declared static but not defined, though outside
sizeof and typeof it is set for other function decls as well. */
#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a variable has been declared threadprivate by
#pragma omp threadprivate. */
#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL))
/* Nonzero for a decl which either doesn't exist or isn't a prototype.
N.B. Could be simplified if all built-in decls had complete prototypes
(but this is presently difficult because some of them need FILE*). */
#define C_DECL_ISNT_PROTOTYPE(EXP) \
(EXP == 0 \
|| (!prototype_p (TREE_TYPE (EXP)) \
&& !DECL_BUILT_IN (EXP)))
/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
TYPE_ARG_TYPES for functions with prototypes, but created for functions
without prototypes. */
#define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE)
/* For a CONSTRUCTOR, whether some initializer contains a
subexpression meaning it is not a constant expression. */
#define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR))
/* Record parser information about an expression that is irrelevant
for code generation alongside a tree representing its value. */
struct c_expr
{
/* The value of the expression. */
tree value;
/* Record the original unary/binary operator of an expression, which may
have been changed by fold, STRING_CST for unparenthesized string
constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls
(even if parenthesized), for subexpressions, and for non-constant
initializers, or ERROR_MARK for other expressions (including
parenthesized expressions). */
enum tree_code original_code;
/* If not NULL, the original type of an expression. This will
differ from the type of the value field for an enum constant.
The type of an enum constant is a plain integer type, but this
field will be the enum type. */
tree original_type;
};
/* Type alias for struct c_expr. This allows to use the structure
inside the VEC types. */
typedef struct c_expr c_expr_t;
/* A varray of c_expr_t. */
/* Append a new c_expr_t element to V. */
#define C_EXPR_APPEND(V, ELEM) \
do { \
c_expr_t __elem = (ELEM); \
vec_safe_push (V, __elem); \
} while (0)
/* A kind of type specifier. Note that this information is currently
only used to distinguish tag definitions, tag references and typeof
uses. */
enum c_typespec_kind {
/* No typespec. This appears only in struct c_declspec. */
ctsk_none,
/* A reserved keyword type specifier. */
ctsk_resword,
/* A reference to a tag, previously declared, such as "struct foo".
This includes where the previous declaration was as a different
kind of tag, in which case this is only valid if shadowing that
tag in an inner scope. */
ctsk_tagref,
/* A reference to a tag, not previously declared in a visible
scope. */
ctsk_tagfirstref,
/* A definition of a tag such as "struct foo { int a; }". */
ctsk_tagdef,
/* A typedef name. */
ctsk_typedef,
/* An ObjC-specific kind of type specifier. */
ctsk_objc,
/* A typeof specifier. */
ctsk_typeof
};
/* A type specifier: this structure is created in the parser and
passed to declspecs_add_type only. */
struct c_typespec {
/* What kind of type specifier this is. */
enum c_typespec_kind kind;
/* Whether the expression has operands suitable for use in constant
expressions. */
bool expr_const_operands;
/* The specifier itself. */
tree spec;
/* An expression to be evaluated before the type specifier, in the
case of typeof specifiers, or NULL otherwise or if no such
expression is required for a particular typeof specifier. In
particular, when typeof is applied to an expression of variably
modified type, that expression must be evaluated in order to
determine array sizes that form part of the type, but the
expression itself (as opposed to the array sizes) forms no part
of the type and so needs to be recorded separately. */
tree expr;
};
/* A storage class specifier. */
enum c_storage_class {
csc_none,
csc_auto,
csc_extern,
csc_register,
csc_static,
csc_typedef
};
/* A type specifier keyword "void", "_Bool", "char", "int", "float",
"double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum",
or none of these. */
enum c_typespec_keyword {
cts_none,
cts_void,
cts_bool,
cts_char,
cts_int,
cts_float,
cts_int128,
cts_double,
cts_dfloat32,
cts_dfloat64,
cts_dfloat128,
cts_fract,
cts_accum
};
/* This enum lists all the possible declarator specifiers, storage
class or attribute that a user can write. There is at least one
enumerator per possible declarator specifier in the struct
c_declspecs below.
It is used to index the array of declspec locations in struct
c_declspecs. */
enum c_declspec_word {
cdw_typespec /* A catch-all for a typespec. */,
cdw_storage_class /* A catch-all for a storage class */,
cdw_attributes,
cdw_typedef,
cdw_explicit_signed,
cdw_deprecated,
cdw_default_int,
cdw_long,
cdw_long_long,
cdw_short,
cdw_signed,
cdw_unsigned,
cdw_complex,
cdw_inline,
cdw_noreturn,
cdw_thread,
cdw_const,
cdw_volatile,
cdw_restrict,
cdw_saturating,
cdw_alignas,
cdw_address_space,
cdw_number_of_elements /* This one must always be the last
enumerator. */
};
/* A sequence of declaration specifiers in C. When a new declaration
specifier is added, please update the enum c_declspec_word above
accordingly. */
struct c_declspecs {
source_location locations[cdw_number_of_elements];
/* The type specified, if a single type specifier such as a struct,
union or enum specifier, typedef name or typeof specifies the
whole type, or NULL_TREE if none or a keyword such as "void" or
"char" is used. Does not include qualifiers. */
tree type;
/* Any expression to be evaluated before the type, from a typeof
specifier. */
tree expr;
/* The attributes from a typedef decl. */
tree decl_attr;
/* When parsing, the attributes. Outside the parser, this will be
NULL; attributes (possibly from multiple lists) will be passed
separately. */
tree attrs;
/* The base-2 log of the greatest alignment required by an _Alignas
specifier, in bytes, or -1 if no such specifiers with nonzero
alignment. */
int align_log;
/* The storage class specifier, or csc_none if none. */
enum c_storage_class storage_class;
/* Any type specifier keyword used such as "int", not reflecting
modifiers such as "short", or cts_none if none. */
ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8;
/* The kind of type specifier if one has been seen, ctsk_none
otherwise. */
ENUM_BITFIELD (c_typespec_kind) typespec_kind : 3;
/* Whether any expressions in typeof specifiers may appear in
constant expressions. */
BOOL_BITFIELD expr_const_operands : 1;
/* Whether any declaration specifiers have been seen at all. */
BOOL_BITFIELD declspecs_seen_p : 1;
/* Whether something other than a storage class specifier or
attribute has been seen. This is used to warn for the
obsolescent usage of storage class specifiers other than at the
start of the list. (Doing this properly would require function
specifiers to be handled separately from storage class
specifiers.) */
BOOL_BITFIELD non_sc_seen_p : 1;
/* Whether the type is specified by a typedef or typeof name. */
BOOL_BITFIELD typedef_p : 1;
/* Whether the type is explicitly "signed" or specified by a typedef
whose type is explicitly "signed". */
BOOL_BITFIELD explicit_signed_p : 1;
/* Whether the specifiers include a deprecated typedef. */
BOOL_BITFIELD deprecated_p : 1;
/* Whether the type defaulted to "int" because there were no type
specifiers. */
BOOL_BITFIELD default_int_p : 1;
/* Whether "long" was specified. */
BOOL_BITFIELD long_p : 1;
/* Whether "long" was specified more than once. */
BOOL_BITFIELD long_long_p : 1;
/* Whether "short" was specified. */
BOOL_BITFIELD short_p : 1;
/* Whether "signed" was specified. */
BOOL_BITFIELD signed_p : 1;
/* Whether "unsigned" was specified. */
BOOL_BITFIELD unsigned_p : 1;
/* Whether "complex" was specified. */
BOOL_BITFIELD complex_p : 1;
/* Whether "inline" was specified. */
BOOL_BITFIELD inline_p : 1;
/* Whether "_Noreturn" was speciied. */
BOOL_BITFIELD noreturn_p : 1;
/* Whether "__thread" was specified. */
BOOL_BITFIELD thread_p : 1;
/* Whether "const" was specified. */
BOOL_BITFIELD const_p : 1;
/* Whether "volatile" was specified. */
BOOL_BITFIELD volatile_p : 1;
/* Whether "restrict" was specified. */
BOOL_BITFIELD restrict_p : 1;
/* Whether "_Sat" was specified. */
BOOL_BITFIELD saturating_p : 1;
/* Whether any alignment specifier (even with zero alignment) was
specified. */
BOOL_BITFIELD alignas_p : 1;
/* The address space that the declaration belongs to. */
addr_space_t address_space;
};
/* The various kinds of declarators in C. */
enum c_declarator_kind {
/* An identifier. */
cdk_id,
/* A function. */
cdk_function,
/* An array. */
cdk_array,
/* A pointer. */
cdk_pointer,
/* Parenthesized declarator with nested attributes. */
cdk_attrs
};
typedef struct c_arg_tag_d {
/* The argument name. */
tree id;
/* The type of the argument. */
tree type;
} c_arg_tag;
/* Information about the parameters in a function declarator. */
struct c_arg_info {
/* A list of parameter decls. */
tree parms;
/* A list of structure, union and enum tags defined. */
vec<c_arg_tag, va_gc> *tags;
/* A list of argument types to go in the FUNCTION_TYPE. */
tree types;
/* A list of non-parameter decls (notably enumeration constants)
defined with the parameters. */
tree others;
/* A compound expression of VLA sizes from the parameters, or NULL.
In a function definition, these are used to ensure that
side-effects in sizes of arrays converted to pointers (such as a
parameter int i[n++]) take place; otherwise, they are
ignored. */
tree pending_sizes;
/* True when these arguments had [*]. */
BOOL_BITFIELD had_vla_unspec : 1;
};
/* A declarator. */
struct c_declarator {
/* The kind of declarator. */
enum c_declarator_kind kind;
location_t id_loc; /* Currently only set for cdk_id, cdk_array. */
/* Except for cdk_id, the contained declarator. For cdk_id, NULL. */
struct c_declarator *declarator;
union {
/* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract
declarator. */
tree id;
/* For functions. */
struct c_arg_info *arg_info;
/* For arrays. */
struct {
/* The array dimension, or NULL for [] and [*]. */
tree dimen;
/* The qualifiers inside []. */
int quals;
/* The attributes (currently ignored) inside []. */
tree attrs;
/* Whether [static] was used. */
BOOL_BITFIELD static_p : 1;
/* Whether [*] was used. */
BOOL_BITFIELD vla_unspec_p : 1;
} array;
/* For pointers, the qualifiers on the pointer type. */
int pointer_quals;
/* For attributes. */
tree attrs;
} u;
};
/* A type name. */
struct c_type_name {
/* The declaration specifiers. */
struct c_declspecs *specs;
/* The declarator. */
struct c_declarator *declarator;
};
/* A parameter. */
struct c_parm {
/* The declaration specifiers, minus any prefix attributes. */
struct c_declspecs *specs;
/* The attributes. */
tree attrs;
/* The declarator. */
struct c_declarator *declarator;
};
/* Used when parsing an enum. Initialized by start_enum. */
struct c_enum_contents
{
/* While defining an enum type, this is 1 plus the last enumerator
constant value. */
tree enum_next_value;
/* Nonzero means that there was overflow computing enum_next_value. */
int enum_overflow;
};
/* A type of reference to a static identifier in an inline
function. */
enum c_inline_static_type {
/* Identifier with internal linkage used in function that may be an
inline definition (i.e., file-scope static). */
csi_internal,
/* Modifiable object with static storage duration defined in
function that may be an inline definition (i.e., local
static). */
csi_modifiable
};
/* in c-parser.c */
extern void c_parse_init (void);
/* in c-aux-info.c */
extern void gen_aux_info_record (tree, int, int, int);
/* in c-decl.c */
struct c_spot_bindings;
struct c_struct_parse_info;
extern struct obstack parser_obstack;
extern tree c_break_label;
extern tree c_cont_label;
extern bool global_bindings_p (void);
extern void push_scope (void);
extern tree pop_scope (void);
extern void c_bindings_start_stmt_expr (struct c_spot_bindings *);
extern void c_bindings_end_stmt_expr (struct c_spot_bindings *);
extern void record_inline_static (location_t, tree, tree,
enum c_inline_static_type);
extern void c_init_decl_processing (void);
extern void c_print_identifier (FILE *, tree, int);
extern int quals_from_declspecs (const struct c_declspecs *);
extern struct c_declarator *build_array_declarator (location_t, tree,
struct c_declspecs *,
bool, bool);
extern tree build_enumerator (location_t, location_t, struct c_enum_contents *,
tree, tree);
extern tree check_for_loop_decls (location_t, bool);
extern void mark_forward_parm_decls (void);
extern void declare_parm_level (void);
extern void undeclared_variable (location_t, tree);
extern tree lookup_label_for_goto (location_t, tree);
extern tree declare_label (tree);
extern tree define_label (location_t, tree);
extern struct c_spot_bindings *c_get_switch_bindings (void);
extern void c_release_switch_bindings (struct c_spot_bindings *);
extern bool c_check_switch_jump_warnings (struct c_spot_bindings *,
location_t, location_t);
extern void finish_decl (tree, location_t, tree, tree, tree);
extern tree finish_enum (tree, tree, tree);
extern void finish_function (void);
extern tree finish_struct (location_t, tree, tree, tree,
struct c_struct_parse_info *);
extern struct c_arg_info *build_arg_info (void);
extern struct c_arg_info *get_parm_info (bool, tree);
extern tree grokfield (location_t, struct c_declarator *,
struct c_declspecs *, tree, tree *);
extern tree groktypename (struct c_type_name *, tree *, bool *);
extern tree grokparm (const struct c_parm *, tree *);
extern tree implicitly_declare (location_t, tree);
extern void keep_next_level (void);
extern void pending_xref_error (void);
extern void c_push_function_context (void);
extern void c_pop_function_context (void);
extern void push_parm_decl (const struct c_parm *, tree *);
extern struct c_declarator *set_array_declarator_inner (struct c_declarator *,
struct c_declarator *);
extern tree c_builtin_function (tree);
extern tree c_builtin_function_ext_scope (tree);
extern void shadow_tag (const struct c_declspecs *);
extern void shadow_tag_warned (const struct c_declspecs *, int);
extern tree start_enum (location_t, struct c_enum_contents *, tree);
extern int start_function (struct c_declspecs *, struct c_declarator *, tree);
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree);
extern tree start_struct (location_t, enum tree_code, tree,
struct c_struct_parse_info **);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern tree xref_tag (enum tree_code, tree);
extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree);
extern struct c_parm *build_c_parm (struct c_declspecs *, tree,
struct c_declarator *);
extern struct c_declarator *build_attrs_declarator (tree,
struct c_declarator *);
extern struct c_declarator *build_function_declarator (struct c_arg_info *,
struct c_declarator *);
extern struct c_declarator *build_id_declarator (tree);
extern struct c_declarator *make_pointer_declarator (struct c_declspecs *,
struct c_declarator *);
extern struct c_declspecs *build_null_declspecs (void);
extern struct c_declspecs *declspecs_add_qual (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_type (location_t,
struct c_declspecs *,
struct c_typespec);
extern struct c_declspecs *declspecs_add_scspec (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_attrs (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_addrspace (source_location,
struct c_declspecs *,
addr_space_t);
extern struct c_declspecs *declspecs_add_alignas (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *finish_declspecs (struct c_declspecs *);
/* in c-objc-common.c */
extern bool c_objc_common_init (void);
extern bool c_missing_noreturn_ok_p (tree);
extern bool c_warn_unused_global_decl (const_tree);
extern void c_initialize_diagnostics (diagnostic_context *);
extern bool c_vla_unspec_p (tree x, tree fn);
/* in c-typeck.c */
extern int in_alignof;
extern int in_sizeof;
extern int in_typeof;
extern tree c_last_sizeof_arg;
extern struct c_switch *c_switch_stack;
extern tree c_objc_common_truthvalue_conversion (location_t, tree);
extern tree require_complete_type (tree);
extern int same_translation_unit_p (const_tree, const_tree);
extern int comptypes (tree, tree);
extern int comptypes_check_different_types (tree, tree, bool *);
extern bool c_vla_type_p (const_tree);
extern bool c_mark_addressable (tree);
extern void c_incomplete_type_error (const_tree, const_tree);
extern tree c_type_promotes_to (tree);
extern struct c_expr default_function_array_conversion (location_t,
struct c_expr);
extern struct c_expr default_function_array_read_conversion (location_t,
struct c_expr);
extern void mark_exp_read (tree);
extern tree composite_type (tree, tree);
extern tree build_component_ref (location_t, tree, tree);
extern tree build_array_ref (location_t, tree, tree);
extern tree build_external_ref (location_t, tree, int, tree *);
extern void pop_maybe_used (bool);
extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr);
extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *);
extern struct c_expr parser_build_unary_op (location_t, enum tree_code,
struct c_expr);
extern struct c_expr parser_build_binary_op (location_t,
enum tree_code, struct c_expr,
struct c_expr);
extern tree build_conditional_expr (location_t, tree, bool, tree, tree,
tree, tree);
extern tree build_compound_expr (location_t, tree, tree);
extern tree c_cast_expr (location_t, struct c_type_name *, tree);
extern tree build_c_cast (location_t, tree, tree);
extern void store_init_value (location_t, tree, tree, tree);
extern void error_init (const char *);
extern void pedwarn_init (location_t, int opt, const char *);
extern void maybe_warn_string_init (tree, struct c_expr);
extern void start_init (tree, tree, int);
extern void finish_init (void);
extern void really_start_incremental_init (tree);
extern void push_init_level (int, struct obstack *);
extern struct c_expr pop_init_level (int, struct obstack *);
extern void set_init_index (tree, tree, struct obstack *);
extern void set_init_label (tree, struct obstack *);
extern void process_init_element (struct c_expr, bool, struct obstack *);
extern tree build_compound_literal (location_t, tree, tree, bool);
extern void check_compound_literal_type (location_t, struct c_type_name *);
extern tree c_start_case (location_t, location_t, tree);
extern void c_finish_case (tree);
extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool);
extern tree build_asm_stmt (tree, tree);
extern int c_types_compatible_p (tree, tree);
extern tree c_begin_compound_stmt (bool);
extern tree c_end_compound_stmt (location_t, tree, bool);
extern void c_finish_if_stmt (location_t, tree, tree, tree, bool);
extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool);
extern tree c_begin_stmt_expr (void);
extern tree c_finish_stmt_expr (location_t, tree);
extern tree c_process_expr_stmt (location_t, tree);
extern tree c_finish_expr_stmt (location_t, tree);
extern tree c_finish_return (location_t, tree, tree);
extern tree c_finish_bc_stmt (location_t, tree *, bool);
extern tree c_finish_goto_label (location_t, tree);
extern tree c_finish_goto_ptr (location_t, tree);
extern tree c_expr_to_decl (tree, bool *, bool *);
extern tree c_begin_omp_parallel (void);
extern tree c_finish_omp_parallel (location_t, tree, tree);
extern tree c_begin_omp_task (void);
extern tree c_finish_omp_task (location_t, tree, tree);
extern tree c_finish_omp_clauses (tree);
extern tree c_build_va_arg (location_t, tree, tree);
extern tree c_finish_transaction (location_t, tree, int);
extern tree c_build_function_call_vec (location_t, tree, vec<tree, va_gc> *,
vec<tree, va_gc> *);
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
extern int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
extern int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
extern int current_function_returns_abnormally;
/* Mode used to build pointers (VOIDmode means ptr_mode). */
extern enum machine_mode c_default_pointer_mode;
/* In c-decl.c */
extern void c_finish_incomplete_decl (tree);
extern void c_write_global_declarations (void);
/* In c-errors.c */
extern void pedwarn_c90 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
extern void pedwarn_c99 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
#endif /* ! GCC_C_TREE_H */
|
prepare_up.c | /************************************************************************************
File: prepare_up.c
Contains the funtions used to prepare the upload of the page
***********************************************************************************/
#include "pixmap.h"
#include "upload.h"
/* Prepares the upload of the page */
/***********************************************************************************/
void prepare_up(page in_page) {
int i, j;
//////////////////////////////////////////////////////////////////////////////////////
/* TO COMPLETE: code to prepare the upload of the page */
//////////////////////////////////////////////////////////////////////////////////////
#pragma omp parallel for schedule(dynamic, 4) private(i, j)
for (i = 0; i < in_page.h; i++)
for (j = 0; j < in_page.w; j++)
upload(in_page.im[i][j], i, j, in_page.h, in_page.w);
}
|
spmv_N_thread_newalg.c | /*
*********************************************
* 314 Principles of Programming Languages *
* Fall 2016 *
*********************************************
*
* Read a real (non-complex) sparse matrix from a Matrix Market (v. 2.0) file
* and a vector from a txt file, perform matrix multiplication and store the
* result to output.txt. This is the parallel and static version of sparse matrix vector
* multiplication.
*
*
*
* NOTES:
*
* 1) Matrix Market files are always 1-based, i.e. the index of the first
* element of a matrix is (1,1), not (0,0) as in C. ADJUST THESE
* OFFSETS ACCORDINGLY offsets accordingly when reading and writing
* to files.
*
* 2) ANSI C requires one to use the "l" format modifier when reading
* double precision floating point numbers in scanf() and
* its variants. For example, use "%lf", "%lg", or "%le"
* when reading doubles, otherwise errors will occur.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "mmio.h"
#include <omp.h>
#include <sys/time.h>
#include "utils.h"
//sorting according to the index
void quicksort(double* a, double* vindex, int* rindex, int* cindex, int n)
{
int i, j, m;
double p, t, s;
if (n < 2)
return;
p = vindex[n / 2];
for (i = 0, j = n - 1;; i++, j--) {
while (vindex[i]<p)
i++;
while (p<vindex[j])
j--;
if (i >= j)
break;
t = a[i];
a[i] = a[j];
a[j] = t;
s = vindex[i];
vindex[i] = vindex[j];
vindex[j] = s;
m = rindex[i];
rindex[i] = rindex[j];
rindex[j] = m;
m = cindex[i];
cindex[i] = cindex[j];
cindex[j] = m;
}
quicksort(a, vindex, rindex, cindex, i);
quicksort(a + i, vindex + i, rindex + i, cindex + i, n - i);
}
int main(int argc, char *argv[])
{
int ret_code;
MM_typecode matcode;
FILE *f;
int M, N, nz; //M is row number, N is column number and nz is the number of entry
int tmp, i, j, vecdim, *rIndex, *cIndex, *rsIndex, *reIndex;
double *val, *res, *vec, *vIndex;
if (argc < 4)
{
fprintf(stderr, "Usage: %s [martix-market-filename] [input-vector-filename] [thread-num]\n", argv[0]);
exit(1);
}
printf("\nOpening input matrix file: %s\n", argv[1]);
if ((f = fopen(argv[1], "r")) == NULL)
{
printf("Fail to open the input matrix file!\n");
exit(1);
}
if (mm_read_banner(f, &matcode) != 0)
{
printf("Could not process Matrix Market banner.\n");
exit(1);
}
/* This is how one can screen matrix types if their application */
/* only supports a subset of the Matrix Market data types. */
if (mm_is_complex(matcode) && mm_is_matrix(matcode) &&
mm_is_sparse(matcode))
{
printf("Sorry, this application does not support ");
printf("Market Market type: [%s]\n", mm_typecode_to_str(matcode));
exit(1);
}
/* find out size of sparse matrix .... */
if ((ret_code = mm_read_mtx_crd_size(f, &M, &N, &nz)) != 0)
exit(1);
/* reseve memory for matrices */
rIndex = (int *)malloc(nz * sizeof(int));
cIndex = (int *)malloc(nz * sizeof(int));
val = (double *)malloc(nz * sizeof(double));
/* NOTE: when reading in doubles, ANSI C requires the use of the "l" */
/* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */
/* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */
for (i = 0; i<nz; i++)
{
fscanf(f, "%d %d %lg\n", &rIndex[i], &cIndex[i], &val[i]);
rIndex[i]--; /* adjust from 1-based to 0-based */
cIndex[i]--;
}
if (f != stdin) fclose(f);
printf("Opening input vector file: %s\n", argv[2]);
//open and load the vector input
if ((f = fopen(argv[2], "r")) == NULL)
{
printf("Fail to open the input vector file!\n");
exit(1);
}
fscanf(f, "%d\n", &vecdim);
if (vecdim != M)
{
printf("dimension mismatch!\n");
exit(1);
}
vec = (double*)malloc(vecdim * sizeof(double));
for (i = 0; i<vecdim; i++)
{
fscanf(f, "%lg\n", &vec[i]);
}
if (f != stdin) fclose(f);
//the original calculation result
double* res_seq = (double*)malloc(M*sizeof(double));
memset(res_seq, 0, M*sizeof(double));
getmul(val, vec, rIndex, cIndex, nz, res_seq);
vIndex = (double*)malloc(nz*sizeof(double));
memset(vIndex, 0, nz*sizeof(double));
for (i = 0; i < nz; i++)
{
vIndex[i] = (double)rIndex[i] * N + cIndex[i];
if (vIndex[i] < 0)
{
printf("Error!\n");
exit(1);
}
}
quicksort(val, vIndex, rIndex, cIndex, nz);
//We use rsIndex/reIndex to keep the start/end position of each row. The intial values are
//-1 or -2 for all entries. rsIndex[i] indicates the start poistion of the i-th row. Hence
//the position index of the i-th row is from rsIndex[i] to reIndex[i]
rsIndex = (int*)malloc(M*sizeof(int)); //start/end position of each row
memset(rsIndex, -1, M*sizeof(int));
reIndex = (int*)malloc(M*sizeof(int));
memset(reIndex, -2, M*sizeof(int));
for (i = 0; i<nz; i++)
{
int tmp = (int)(vIndex[i] / N);
if (rsIndex[tmp] == -1)
{
rsIndex[tmp] = i;
reIndex[tmp] = i;
}
else
reIndex[tmp] = i;
}
int thread_num = atoi(argv[3]);
omp_set_num_threads(thread_num);
printf("\n Start computation ... \n");
struct timeval start, end;
gettimeofday(&start, NULL);
/************************/
/* now calculate the multiplication */
/************************/
res = (double*)malloc(M*sizeof(double));
memset(res, 0, M*sizeof(double));
// Your OpenMP pragma should be inserted for one or both loops below.
// You need to determine which loop is safe to be parallelized.
// You will also need to use correct parallelization parameters.
// Here is the file where you design your own parallelization strategy. You
// are allowed to perform loop transformation, i.e., loop tiling, and
// additional steps of preprocessing, i.e., load balancing, before this
// two-level nested loop.
int chunk = 1000;
#pragma omp parallel num_threads(thread_num)
{
#pragma omp for private(j, tmp) schedule(dynamic, chunk)
for (i=0; i<M; i++)
{
double result = 0.0;
for (j = rsIndex[i]; j <= reIndex[i]; j++)
{
tmp = cIndex[j];
result += val[j] * vec[tmp];
}
res[i] = result;
}
}
gettimeofday(&end, NULL);
printf(" End of computation ... \n\n");
long elapsed_time = ((end.tv_sec * 1000000 + end.tv_usec)
- (start.tv_sec * 1000000 + start.tv_usec));
if (!checkerror(res, res_seq, M))
{
printf("Calculation Error!\n");
exit(1);
}
else {
printf(" Test Result Passed ... \n");
}
printf("Dynamic Parallelization Total time: %ld micro-seconds\n\n", elapsed_time);
if (!checkerror(res, res_seq, M))
{
printf("Calculation Error!\n");
exit(1);
}
// save the result
if ((f = fopen("output.txt", "w")) == NULL)
{
printf("Fail to open the output file!\n");
exit(1);
}
for (i = 0; i<vecdim; i++)
{
fprintf(f, "%lg\n", res[i]);
}
fclose(f);
free(res_seq);
free(vIndex);
free(res);
free(vec);
free(rIndex);
free(cIndex);
free(val);
free(rsIndex);
free(reIndex);
return 0;
}
|
c-tree.h | /* Definitions for C parsing and type checking.
Copyright (C) 1987-2021 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_C_TREE_H
#define GCC_C_TREE_H
#include "c-family/c-common.h"
#include "diagnostic.h"
/* struct lang_identifier is private to c-decl.c, but langhooks.c needs to
know how big it is. This is sanity-checked in c-decl.c. */
#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \
(sizeof (struct c_common_identifier) + 3 * sizeof (void *))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
nonzero if the definition of the type has already started. */
#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE)
/* In an incomplete RECORD_TYPE, UNION_TYPE or ENUMERAL_TYPE, a list of
variable declarations whose type would be completed by completing
that type. */
#define C_TYPE_INCOMPLETE_VARS(TYPE) \
TYPE_LANG_SLOT_1 (TREE_CHECK4 (TYPE, RECORD_TYPE, UNION_TYPE, \
QUAL_UNION_TYPE, ENUMERAL_TYPE))
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID)
/* Record whether a type or decl was written with nonconstant size.
Note that TYPE_SIZE may have simplified to a constant. */
#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE)
#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE)
/* Record whether a type is defined inside a struct or union type.
This is used for -Wc++-compat. */
#define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE)
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was defined without an explicit
return type. */
#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */
#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
/* For a PARM_DECL, nonzero if it was declared as an array. */
#define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0 (NODE)
/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has
been declared. */
#define C_DECL_DECLARED_BUILTIN(EXP) \
DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP))
/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a
built-in prototype and does not have a non-built-in prototype. */
#define C_DECL_BUILTIN_PROTOTYPE(EXP) \
DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a decl was declared register. This is strictly a
front-end flag, whereas DECL_REGISTER is used for code generation;
they may differ for structures with volatile fields. */
#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP)
/* Record whether a decl was used in an expression anywhere except an
unevaluated operand of sizeof / typeof / alignof. This is only
used for functions declared static but not defined, though outside
sizeof and typeof it is set for other function decls as well. */
#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a variable has been declared threadprivate by
#pragma omp threadprivate. */
#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL))
/* Set on VAR_DECLs for compound literals. */
#define C_DECL_COMPOUND_LITERAL_P(DECL) \
DECL_LANG_FLAG_5 (VAR_DECL_CHECK (DECL))
/* Nonzero for a decl which either doesn't exist or isn't a prototype.
N.B. Could be simplified if all built-in decls had complete prototypes
(but this is presently difficult because some of them need FILE*). */
#define C_DECL_ISNT_PROTOTYPE(EXP) \
(EXP == 0 \
|| (!prototype_p (TREE_TYPE (EXP)) \
&& !fndecl_built_in_p (EXP)))
/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
TYPE_ARG_TYPES for functions with prototypes, but created for functions
without prototypes. */
#define TYPE_ACTUAL_ARG_TYPES(NODE) \
TYPE_LANG_SLOT_1 (FUNCTION_TYPE_CHECK (NODE))
/* For a CONSTRUCTOR, whether some initializer contains a
subexpression meaning it is not a constant expression. */
#define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR))
/* For a SAVE_EXPR, nonzero if the operand of the SAVE_EXPR has already
been folded. */
#define SAVE_EXPR_FOLDED_P(EXP) TREE_LANG_FLAG_1 (SAVE_EXPR_CHECK (EXP))
/* Record parser information about an expression that is irrelevant
for code generation alongside a tree representing its value. */
struct c_expr
{
/* The value of the expression. */
tree value;
/* Record the original unary/binary operator of an expression, which may
have been changed by fold, STRING_CST for unparenthesized string
constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls
(even if parenthesized), for subexpressions, and for non-constant
initializers, or ERROR_MARK for other expressions (including
parenthesized expressions). */
enum tree_code original_code;
/* If not NULL, the original type of an expression. This will
differ from the type of the value field for an enum constant.
The type of an enum constant is a plain integer type, but this
field will be the enum type. */
tree original_type;
/* The source range of this expression. This is redundant
for node values that have locations, but not all node kinds
have locations (e.g. constants, and references to params, locals,
etc), so we stash a copy here. */
source_range src_range;
/* Access to the first and last locations within the source spelling
of this expression. */
location_t get_start () const { return src_range.m_start; }
location_t get_finish () const { return src_range.m_finish; }
location_t get_location () const
{
if (EXPR_HAS_LOCATION (value))
return EXPR_LOCATION (value);
else
return make_location (get_start (), get_start (), get_finish ());
}
/* Set the value to error_mark_node whilst ensuring that src_range
is initialized. */
void set_error ()
{
value = error_mark_node;
src_range.m_start = UNKNOWN_LOCATION;
src_range.m_finish = UNKNOWN_LOCATION;
}
};
/* Type alias for struct c_expr. This allows to use the structure
inside the VEC types. */
typedef struct c_expr c_expr_t;
/* A kind of type specifier. Note that this information is currently
only used to distinguish tag definitions, tag references and typeof
uses. */
enum c_typespec_kind {
/* No typespec. This appears only in struct c_declspec. */
ctsk_none,
/* A reserved keyword type specifier. */
ctsk_resword,
/* A reference to a tag, previously declared, such as "struct foo".
This includes where the previous declaration was as a different
kind of tag, in which case this is only valid if shadowing that
tag in an inner scope. */
ctsk_tagref,
/* Likewise, with standard attributes present in the reference. */
ctsk_tagref_attrs,
/* A reference to a tag, not previously declared in a visible
scope. */
ctsk_tagfirstref,
/* Likewise, with standard attributes present in the reference. */
ctsk_tagfirstref_attrs,
/* A definition of a tag such as "struct foo { int a; }". */
ctsk_tagdef,
/* A typedef name. */
ctsk_typedef,
/* An ObjC-specific kind of type specifier. */
ctsk_objc,
/* A typeof specifier, or _Atomic ( type-name ). */
ctsk_typeof
};
/* A type specifier: this structure is created in the parser and
passed to declspecs_add_type only. */
struct c_typespec {
/* What kind of type specifier this is. */
enum c_typespec_kind kind;
/* Whether the expression has operands suitable for use in constant
expressions. */
bool expr_const_operands;
/* The specifier itself. */
tree spec;
/* An expression to be evaluated before the type specifier, in the
case of typeof specifiers, or NULL otherwise or if no such
expression is required for a particular typeof specifier. In
particular, when typeof is applied to an expression of variably
modified type, that expression must be evaluated in order to
determine array sizes that form part of the type, but the
expression itself (as opposed to the array sizes) forms no part
of the type and so needs to be recorded separately. */
tree expr;
};
/* A storage class specifier. */
enum c_storage_class {
csc_none,
csc_auto,
csc_extern,
csc_register,
csc_static,
csc_typedef
};
/* A type specifier keyword "void", "_Bool", "char", "int", "float",
"double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum",
or none of these. */
enum c_typespec_keyword {
cts_none,
cts_void,
cts_bool,
cts_char,
cts_int,
cts_float,
cts_int_n,
cts_double,
cts_dfloat32,
cts_dfloat64,
cts_dfloat128,
cts_floatn_nx,
cts_fract,
cts_accum,
cts_auto_type
};
/* This enum lists all the possible declarator specifiers, storage
class or attribute that a user can write. There is at least one
enumerator per possible declarator specifier in the struct
c_declspecs below.
It is used to index the array of declspec locations in struct
c_declspecs. */
enum c_declspec_word {
cdw_typespec /* A catch-all for a typespec. */,
cdw_storage_class /* A catch-all for a storage class */,
cdw_attributes,
cdw_typedef,
cdw_explicit_signed,
cdw_deprecated,
cdw_default_int,
cdw_long,
cdw_long_long,
cdw_short,
cdw_signed,
cdw_unsigned,
cdw_complex,
cdw_inline,
cdw_noreturn,
cdw_thread,
cdw_const,
cdw_volatile,
cdw_restrict,
cdw_atomic,
cdw_saturating,
cdw_alignas,
cdw_address_space,
cdw_gimple,
cdw_rtl,
cdw_number_of_elements /* This one must always be the last
enumerator. */
};
enum c_declspec_il {
cdil_none,
cdil_gimple, /* __GIMPLE */
cdil_gimple_cfg, /* __GIMPLE(cfg) */
cdil_gimple_ssa, /* __GIMPLE(ssa) */
cdil_rtl /* __RTL */
};
/* A sequence of declaration specifiers in C. When a new declaration
specifier is added, please update the enum c_declspec_word above
accordingly. */
struct c_declspecs {
location_t locations[cdw_number_of_elements];
/* The type specified, if a single type specifier such as a struct,
union or enum specifier, typedef name or typeof specifies the
whole type, or NULL_TREE if none or a keyword such as "void" or
"char" is used. Does not include qualifiers. */
tree type;
/* Any expression to be evaluated before the type, from a typeof
specifier. */
tree expr;
/* The attributes from a typedef decl. */
tree decl_attr;
/* When parsing, the GNU attributes and prefix standard attributes.
Outside the parser, this will be NULL; attributes (possibly from
multiple lists) will be passed separately. */
tree attrs;
/* When parsing, postfix standard attributes (which appertain to the
type specified by the preceding declaration specifiers, unlike
prefix standard attributes which appertain to the declaration or
declarations as a whole). */
tree postfix_attrs;
/* The pass to start compiling a __GIMPLE or __RTL function with. */
char *gimple_or_rtl_pass;
/* ENTRY BB count. */
profile_count entry_bb_count;
/* The base-2 log of the greatest alignment required by an _Alignas
specifier, in bytes, or -1 if no such specifiers with nonzero
alignment. */
int align_log;
/* For the __intN declspec, this stores the index into the int_n_* arrays. */
int int_n_idx;
/* For the _FloatN and _FloatNx declspec, this stores the index into
the floatn_nx_types array. */
int floatn_nx_idx;
/* The storage class specifier, or csc_none if none. */
enum c_storage_class storage_class;
/* Any type specifier keyword used such as "int", not reflecting
modifiers such as "short", or cts_none if none. */
ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8;
/* The kind of type specifier if one has been seen, ctsk_none
otherwise. */
ENUM_BITFIELD (c_typespec_kind) typespec_kind : 4;
ENUM_BITFIELD (c_declspec_il) declspec_il : 3;
/* Whether any expressions in typeof specifiers may appear in
constant expressions. */
BOOL_BITFIELD expr_const_operands : 1;
/* Whether any declaration specifiers have been seen at all. */
BOOL_BITFIELD declspecs_seen_p : 1;
/* Whether any declaration specifiers other than standard attributes
have been seen at all. If only standard attributes have been
seen, this is an attribute-declaration. */
BOOL_BITFIELD non_std_attrs_seen_p : 1;
/* Whether something other than a storage class specifier or
attribute has been seen. This is used to warn for the
obsolescent usage of storage class specifiers other than at the
start of the list. (Doing this properly would require function
specifiers to be handled separately from storage class
specifiers.) */
BOOL_BITFIELD non_sc_seen_p : 1;
/* Whether the type is specified by a typedef or typeof name. */
BOOL_BITFIELD typedef_p : 1;
/* Whether the type is explicitly "signed" or specified by a typedef
whose type is explicitly "signed". */
BOOL_BITFIELD explicit_signed_p : 1;
/* Whether the specifiers include a deprecated typedef. */
BOOL_BITFIELD deprecated_p : 1;
/* Whether the type defaulted to "int" because there were no type
specifiers. */
BOOL_BITFIELD default_int_p : 1;
/* Whether "long" was specified. */
BOOL_BITFIELD long_p : 1;
/* Whether "long" was specified more than once. */
BOOL_BITFIELD long_long_p : 1;
/* Whether "short" was specified. */
BOOL_BITFIELD short_p : 1;
/* Whether "signed" was specified. */
BOOL_BITFIELD signed_p : 1;
/* Whether "unsigned" was specified. */
BOOL_BITFIELD unsigned_p : 1;
/* Whether "complex" was specified. */
BOOL_BITFIELD complex_p : 1;
/* Whether "inline" was specified. */
BOOL_BITFIELD inline_p : 1;
/* Whether "_Noreturn" was speciied. */
BOOL_BITFIELD noreturn_p : 1;
/* Whether "__thread" or "_Thread_local" was specified. */
BOOL_BITFIELD thread_p : 1;
/* Whether "__thread" rather than "_Thread_local" was specified. */
BOOL_BITFIELD thread_gnu_p : 1;
/* Whether "const" was specified. */
BOOL_BITFIELD const_p : 1;
/* Whether "volatile" was specified. */
BOOL_BITFIELD volatile_p : 1;
/* Whether "restrict" was specified. */
BOOL_BITFIELD restrict_p : 1;
/* Whether "_Atomic" was specified. */
BOOL_BITFIELD atomic_p : 1;
/* Whether "_Sat" was specified. */
BOOL_BITFIELD saturating_p : 1;
/* Whether any alignment specifier (even with zero alignment) was
specified. */
BOOL_BITFIELD alignas_p : 1;
/* The address space that the declaration belongs to. */
addr_space_t address_space;
};
/* The various kinds of declarators in C. */
enum c_declarator_kind {
/* An identifier. */
cdk_id,
/* A function. */
cdk_function,
/* An array. */
cdk_array,
/* A pointer. */
cdk_pointer,
/* Parenthesized declarator with nested attributes. */
cdk_attrs
};
struct c_arg_tag {
/* The argument name. */
tree id;
/* The type of the argument. */
tree type;
};
/* Information about the parameters in a function declarator. */
struct c_arg_info {
/* A list of parameter decls. */
tree parms;
/* A list of structure, union and enum tags defined. */
vec<c_arg_tag, va_gc> *tags;
/* A list of argument types to go in the FUNCTION_TYPE. */
tree types;
/* A list of non-parameter decls (notably enumeration constants)
defined with the parameters. */
tree others;
/* A compound expression of VLA sizes from the parameters, or NULL.
In a function definition, these are used to ensure that
side-effects in sizes of arrays converted to pointers (such as a
parameter int i[n++]) take place; otherwise, they are
ignored. */
tree pending_sizes;
/* True when these arguments had [*]. */
BOOL_BITFIELD had_vla_unspec : 1;
};
/* A declarator. */
struct c_declarator {
/* The kind of declarator. */
enum c_declarator_kind kind;
location_t id_loc; /* Currently only set for cdk_id, cdk_array. */
/* Except for cdk_id, the contained declarator. For cdk_id, NULL. */
struct c_declarator *declarator;
union {
/* For identifiers. */
struct {
/* An IDENTIFIER_NODE, or NULL_TREE if an abstract
declarator. */
tree id;
/* Any attributes (which apply to the declaration rather than to
the type described by the outer declarators). */
tree attrs;
} id;
/* For functions. */
struct c_arg_info *arg_info;
/* For arrays. */
struct {
/* The array dimension, or NULL for [] and [*]. */
tree dimen;
/* The qualifiers inside []. */
int quals;
/* The attributes (currently ignored) inside []. */
tree attrs;
/* Whether [static] was used. */
BOOL_BITFIELD static_p : 1;
/* Whether [*] was used. */
BOOL_BITFIELD vla_unspec_p : 1;
} array;
/* For pointers, the qualifiers on the pointer type. */
int pointer_quals;
/* For attributes. */
tree attrs;
} u;
};
/* A type name. */
struct c_type_name {
/* The declaration specifiers. */
struct c_declspecs *specs;
/* The declarator. */
struct c_declarator *declarator;
};
/* A parameter. */
struct c_parm {
/* The declaration specifiers, minus any prefix attributes. */
struct c_declspecs *specs;
/* The attributes. */
tree attrs;
/* The declarator. */
struct c_declarator *declarator;
/* The location of the parameter. */
location_t loc;
};
/* Used when parsing an enum. Initialized by start_enum. */
struct c_enum_contents
{
/* While defining an enum type, this is 1 plus the last enumerator
constant value. */
tree enum_next_value;
/* Nonzero means that there was overflow computing enum_next_value. */
int enum_overflow;
};
/* A type of reference to a static identifier in an inline
function. */
enum c_inline_static_type {
/* Identifier with internal linkage used in function that may be an
inline definition (i.e., file-scope static). */
csi_internal,
/* Modifiable object with static storage duration defined in
function that may be an inline definition (i.e., local
static). */
csi_modifiable
};
/* in c-parser.c */
extern void c_parse_init (void);
extern bool c_keyword_starts_typename (enum rid keyword);
/* in c-aux-info.c */
extern void gen_aux_info_record (tree, int, int, int);
/* in c-decl.c */
struct c_spot_bindings;
class c_struct_parse_info;
extern struct obstack parser_obstack;
/* Set to IN_ITERATION_STMT if parsing an iteration-statement,
to IN_OMP_BLOCK if parsing OpenMP structured block and
IN_OMP_FOR if parsing OpenMP loop. If parsing a switch statement,
this is bitwise ORed with IN_SWITCH_STMT, unless parsing an
iteration-statement, OpenMP block or loop within that switch. */
#define IN_SWITCH_STMT 1
#define IN_ITERATION_STMT 2
#define IN_OMP_BLOCK 4
#define IN_OMP_FOR 8
#define IN_OBJC_FOREACH 16
extern unsigned char in_statement;
extern bool switch_statement_break_seen_p;
extern bool global_bindings_p (void);
extern tree pushdecl (tree);
extern void push_scope (void);
extern tree pop_scope (void);
extern void c_bindings_start_stmt_expr (struct c_spot_bindings *);
extern void c_bindings_end_stmt_expr (struct c_spot_bindings *);
extern void record_inline_static (location_t, tree, tree,
enum c_inline_static_type);
extern void c_init_decl_processing (void);
extern void c_print_identifier (FILE *, tree, int);
extern int quals_from_declspecs (const struct c_declspecs *);
extern struct c_declarator *build_array_declarator (location_t, tree,
struct c_declspecs *,
bool, bool);
extern tree build_enumerator (location_t, location_t, struct c_enum_contents *,
tree, tree);
extern tree check_for_loop_decls (location_t, bool);
extern void mark_forward_parm_decls (void);
extern void declare_parm_level (void);
extern void undeclared_variable (location_t, tree);
extern tree lookup_label_for_goto (location_t, tree);
extern tree declare_label (tree);
extern tree define_label (location_t, tree);
extern struct c_spot_bindings *c_get_switch_bindings (void);
extern void c_release_switch_bindings (struct c_spot_bindings *);
extern bool c_check_switch_jump_warnings (struct c_spot_bindings *,
location_t, location_t);
extern void finish_decl (tree, location_t, tree, tree, tree);
extern tree finish_enum (tree, tree, tree);
extern void finish_function (location_t = input_location);
extern tree finish_struct (location_t, tree, tree, tree,
class c_struct_parse_info *);
extern tree c_simulate_enum_decl (location_t, const char *,
vec<string_int_pair>);
extern struct c_arg_info *build_arg_info (void);
extern struct c_arg_info *get_parm_info (bool, tree);
extern tree grokfield (location_t, struct c_declarator *,
struct c_declspecs *, tree, tree *);
extern tree groktypename (struct c_type_name *, tree *, bool *);
extern tree grokparm (const struct c_parm *, tree *);
extern tree implicitly_declare (location_t, tree);
extern void keep_next_level (void);
extern void pending_xref_error (void);
extern void c_push_function_context (void);
extern void c_pop_function_context (void);
extern void push_parm_decl (const struct c_parm *, tree *);
extern struct c_declarator *set_array_declarator_inner (struct c_declarator *,
struct c_declarator *);
extern tree c_builtin_function (tree);
extern tree c_builtin_function_ext_scope (tree);
extern tree c_simulate_builtin_function_decl (tree);
extern void c_warn_unused_attributes (tree);
extern tree c_warn_type_attributes (tree);
extern void shadow_tag (const struct c_declspecs *);
extern void shadow_tag_warned (const struct c_declspecs *, int);
extern tree start_enum (location_t, struct c_enum_contents *, tree);
extern bool start_function (struct c_declspecs *, struct c_declarator *, tree);
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree, location_t * = NULL);
extern tree start_struct (location_t, enum tree_code, tree,
class c_struct_parse_info **);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern void temp_store_parm_decls (tree, tree);
extern void temp_pop_parm_decls (void);
extern tree xref_tag (enum tree_code, tree);
extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree,
bool, tree);
extern struct c_parm *build_c_parm (struct c_declspecs *, tree,
struct c_declarator *, location_t);
extern struct c_declarator *build_attrs_declarator (tree,
struct c_declarator *);
extern struct c_declarator *build_function_declarator (struct c_arg_info *,
struct c_declarator *);
extern struct c_declarator *build_id_declarator (tree);
extern struct c_declarator *make_pointer_declarator (struct c_declspecs *,
struct c_declarator *);
extern struct c_declspecs *build_null_declspecs (void);
extern struct c_declspecs *declspecs_add_qual (location_t,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_type (location_t,
struct c_declspecs *,
struct c_typespec);
extern struct c_declspecs *declspecs_add_scspec (location_t,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_attrs (location_t,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_addrspace (location_t,
struct c_declspecs *,
addr_space_t);
extern struct c_declspecs *declspecs_add_alignas (location_t,
struct c_declspecs *, tree);
extern struct c_declspecs *finish_declspecs (struct c_declspecs *);
/* in c-objc-common.c */
extern bool c_objc_common_init (void);
extern bool c_missing_noreturn_ok_p (tree);
extern bool c_warn_unused_global_decl (const_tree);
extern void c_initialize_diagnostics (diagnostic_context *);
extern bool c_vla_unspec_p (tree x, tree fn);
extern alias_set_type c_get_alias_set (tree);
/* in c-typeck.c */
extern int in_alignof;
extern int in_sizeof;
extern int in_typeof;
extern bool c_in_omp_for;
extern tree c_last_sizeof_arg;
extern location_t c_last_sizeof_loc;
extern struct c_switch *c_switch_stack;
extern bool char_type_p (tree);
extern tree c_objc_common_truthvalue_conversion (location_t, tree);
extern tree require_complete_type (location_t, tree);
extern bool same_translation_unit_p (const_tree, const_tree);
extern int comptypes (tree, tree);
extern int comptypes_check_different_types (tree, tree, bool *);
extern bool c_vla_type_p (const_tree);
extern bool c_mark_addressable (tree, bool = false);
extern void c_incomplete_type_error (location_t, const_tree, const_tree);
extern tree c_type_promotes_to (tree);
extern struct c_expr default_function_array_conversion (location_t,
struct c_expr);
extern struct c_expr default_function_array_read_conversion (location_t,
struct c_expr);
extern struct c_expr convert_lvalue_to_rvalue (location_t, struct c_expr,
bool, bool);
extern tree decl_constant_value_1 (tree, bool);
extern void mark_exp_read (tree);
extern tree composite_type (tree, tree);
extern tree build_component_ref (location_t, tree, tree, location_t);
extern tree build_array_ref (location_t, tree, tree);
extern tree build_external_ref (location_t, tree, bool, tree *);
extern void pop_maybe_used (bool);
extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr);
extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *);
extern struct c_expr parser_build_unary_op (location_t, enum tree_code,
struct c_expr);
extern struct c_expr parser_build_binary_op (location_t,
enum tree_code, struct c_expr,
struct c_expr);
extern tree build_conditional_expr (location_t, tree, bool, tree, tree,
location_t, tree, tree, location_t);
extern tree build_compound_expr (location_t, tree, tree);
extern tree c_cast_expr (location_t, struct c_type_name *, tree);
extern tree build_c_cast (location_t, tree, tree);
extern void store_init_value (location_t, tree, tree, tree);
extern void maybe_warn_string_init (location_t, tree, struct c_expr);
extern void start_init (tree, tree, int, rich_location *);
extern void finish_init (void);
extern void really_start_incremental_init (tree);
extern void finish_implicit_inits (location_t, struct obstack *);
extern void push_init_level (location_t, int, struct obstack *);
extern struct c_expr pop_init_level (location_t, int, struct obstack *,
location_t);
extern void set_init_index (location_t, tree, tree, struct obstack *);
extern void set_init_label (location_t, tree, location_t, struct obstack *);
extern void process_init_element (location_t, struct c_expr, bool,
struct obstack *);
extern tree build_compound_literal (location_t, tree, tree, bool,
unsigned int);
extern void check_compound_literal_type (location_t, struct c_type_name *);
extern tree c_start_switch (location_t, location_t, tree, bool);
extern void c_finish_switch (tree, tree);
extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool,
bool);
extern tree build_asm_stmt (bool, tree);
extern int c_types_compatible_p (tree, tree);
extern tree c_begin_compound_stmt (bool);
extern tree c_end_compound_stmt (location_t, tree, bool);
extern void c_finish_if_stmt (location_t, tree, tree, tree);
extern void c_finish_loop (location_t, location_t, tree, location_t, tree,
tree, tree, tree, bool);
extern tree c_begin_stmt_expr (void);
extern tree c_finish_stmt_expr (location_t, tree);
extern tree c_process_expr_stmt (location_t, tree);
extern tree c_finish_expr_stmt (location_t, tree);
extern tree c_finish_return (location_t, tree, tree);
extern tree c_finish_bc_stmt (location_t, tree, bool);
extern tree c_finish_goto_label (location_t, tree);
extern tree c_finish_goto_ptr (location_t, tree);
extern tree c_expr_to_decl (tree, bool *, bool *);
extern tree c_finish_omp_construct (location_t, enum tree_code, tree, tree);
extern tree c_finish_oacc_data (location_t, tree, tree);
extern tree c_finish_oacc_host_data (location_t, tree, tree);
extern tree c_begin_omp_parallel (void);
extern tree c_finish_omp_parallel (location_t, tree, tree);
extern tree c_begin_omp_task (void);
extern tree c_finish_omp_task (location_t, tree, tree);
extern void c_finish_omp_cancel (location_t, tree);
extern void c_finish_omp_cancellation_point (location_t, tree);
extern tree c_finish_omp_clauses (tree, enum c_omp_region_type);
extern tree c_build_va_arg (location_t, tree, location_t, tree);
extern tree c_finish_transaction (location_t, tree, int);
extern bool c_tree_equal (tree, tree);
extern tree c_build_function_call_vec (location_t, vec<location_t>, tree,
vec<tree, va_gc> *, vec<tree, va_gc> *);
extern tree c_omp_clause_copy_ctor (tree, tree, tree);
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
extern int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
extern int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
extern int current_function_returns_abnormally;
/* In c-decl.c */
/* Tell the binding oracle what kind of binding we are looking for. */
enum c_oracle_request
{
C_ORACLE_SYMBOL,
C_ORACLE_TAG,
C_ORACLE_LABEL
};
/* If this is non-NULL, then it is a "binding oracle" which can lazily
create bindings when needed by the C compiler. The oracle is told
the name and type of the binding to create. It can call pushdecl
or the like to ensure the binding is visible; or do nothing,
leaving the binding untouched. c-decl.c takes note of when the
oracle has been called and will not call it again if it fails to
create a given binding. */
typedef void c_binding_oracle_function (enum c_oracle_request, tree identifier);
extern c_binding_oracle_function *c_binding_oracle;
extern void c_finish_incomplete_decl (tree);
extern tree c_omp_reduction_id (enum tree_code, tree);
extern tree c_omp_reduction_decl (tree);
extern tree c_omp_reduction_lookup (tree, tree);
extern tree c_check_omp_declare_reduction_r (tree *, int *, void *);
extern bool c_check_in_current_scope (tree);
extern void c_pushtag (location_t, tree, tree);
extern void c_bind (location_t, tree, bool);
extern bool tag_exists_p (enum tree_code, tree);
/* In c-errors.c */
extern bool pedwarn_c90 (location_t, int opt, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
extern bool pedwarn_c99 (location_t, int opt, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
extern bool pedwarn_c11 (location_t, int opt, const char *, ...)
ATTRIBUTE_GCC_DIAG(3,4);
extern void
set_c_expr_source_range (c_expr *expr,
location_t start, location_t finish);
extern void
set_c_expr_source_range (c_expr *expr,
source_range src_range);
/* In c-fold.c */
extern vec<tree> incomplete_record_decls;
#if CHECKING_P
namespace selftest {
extern void run_c_tests (void);
} // namespace selftest
#endif /* #if CHECKING_P */
#endif /* ! GCC_C_TREE_H */
|
scheduled-clauseModificado2.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv) {
int i, n = 16,chunk, a[n],suma=0;
if(argc < 2) {
fprintf(stderr,"\nFalta chunk \n");
exit(-1);
}
chunk = atoi(argv[1]);
for (i=0; i<n; i++) a[i] = i;
printf("\nFuera de parallel:\n");
printf("num_threads: %d\n", omp_get_num_threads());
printf("num_procs: %d\n", omp_get_num_procs());
printf("in_parallel: %d\n", omp_in_parallel());
#pragma omp parallel for firstprivate(suma) lastprivate(suma) schedule(dynamic,chunk)
for (i=0; i<n; i++)
{
if(i == 0){
printf("\nDentro de parallel:\n");
printf("num_threads: %d\n", omp_get_num_threads());
printf("num_procs: %d\n", omp_get_num_procs());
printf("in_parallel: %d\n", omp_in_parallel());
}
suma = suma + a[i];
//printf(" thread %d suma a[%d] suma=%d \n",omp_get_thread_num(),i,suma);
}
//printf("Fuera de 'parallel for' suma=%d\n",suma);
}
|
GB_unaryop__lnot_int8_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int8_uint64
// op(A') function: GB_tran__lnot_int8_uint64
// C type: int8_t
// A type: uint64_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int8_uint64
(
int8_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int8_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
GB_binop__bget_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bget_uint64)
// A.*B function (eWiseMult): GB (_AemultB_01__bget_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__bget_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__bget_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_uint64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bget_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__bget_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_uint64)
// C=scalar+B GB (_bind1st__bget_uint64)
// C=scalar+B' GB (_bind1st_tran__bget_uint64)
// C=A+scalar GB (_bind2nd__bget_uint64)
// C=A'+scalar GB (_bind2nd_tran__bget_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = GB_BITGET (aij, bij, uint64_t, 64)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITGET (x, y, uint64_t, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_UINT64 || GxB_NO_BGET_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bget_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bget_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bget_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bget_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bget_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bget_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bget_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bget_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bget_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITGET (x, bij, uint64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bget_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITGET (aij, y, uint64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (x, aij, uint64_t, 64) ; \
}
GrB_Info GB (_bind1st_tran__bget_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (aij, y, uint64_t, 64) ; \
}
GrB_Info GB (_bind2nd_tran__bget_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__ainv_int64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_int64_int64)
// op(A') function: GB (_unop_tran__ainv_int64_int64)
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = aij ; \
Cx [pC] = -z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_int64_int64)
(
int64_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = -z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
int64_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_int64_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
OMPHeader.h | #include <iostream>
#include <map>
#include <string>
#include <pthread.h>
#include <ftw.h>
#include <dirent.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <cmath>
#include <string>
#include <queue>
#include <hashlibpp.h>
#include <deque>
#include <set>
#include <algorithm>
using namespace std;
typedef struct{
string str;
off_t size;
string HashValue;
}fileKey;
inline bool operator <(fileKey const& left, fileKey const& right) {
if (left.str < right.str) { return true; }
if (left.str== right.str && left.size < right.size) { return true; }
if (left.str== right.str && left.size == right.size && left.HashValue < right.HashValue) { return true; }
return false;
}
typedef std::map<fileKey,int> fileMap;
fileMap fmap;
typedef std::map<string,int> Map;
Map dmap;
Map fileCMap;
Map divMap;
queue <string> dirList;
queue <fileKey> fList;
deque <string> output;
deque <string> outputF;
set <string> finalDir;
set <string> FS1;
set <string> FS2;
int divergent=0;
int n;
int loopVar=1;
int threshold;
int fileCounter=0;
int exact=1;
extern Map threadMap;
extern deque <string> dirContent;
extern deque <string> dirContent1;
extern int dirCount;
#pragma omp threadprivate(threadMap)
Map threadMap;
#pragma omp threadprivate(dirContent)
deque <string> dirContent;
void* dirSubtree(void* t);
void* compareFiles(void* rootname);
void* compareDir(void* rootname);
void helperResult();
void reportResults(); |
pi_spmd_final.c | /**
* NAME: PI SPMD final version without false sharing
*
* This program will numerically compute the integral of
* 4/(1+x*x)
* from 0 to 1. The value of this integral is pi -- which
* is great since it gives us an easy way to check the answer.
*
* The program was parallelized using OpenMP and an SPMD
* algorithm. The following OpenMP specific lines were
* added:
*
* (1) A line to include omp.h -- the include file that
* contains OpenMP's function prototypes and constants.
* (2) A pragma that tells OpenMP to create a team of threads
* with an integer variable i being created for each thread.
* (3) two function calls: one to get the thread ID (ranging
* from 0 to one less than the number of threads), and the other
* returning the total number of threads.
* (4) A "single" construct so only one thread prints the number
* of threads.
* (5) A cyclic distribution of the loop by changing loop control
* expressions to run from the thread ID incremented by the number
* of threads. Local sums accumlated into sum[id].
* (6) A barrier to make sure everyone's done.
* (7) A single construct so only one thread combines the local
* sums into a single global sum.
*
* Note that this program avoids the false sharing problem
* by storing partial sums into a private scalar.
*
* History: Written by Tim Mattson, 11/99.
**/
#include <stdio.h>
#include <omp.h>
#define MAX_THREADS 4
static long num_steps = 100000000;
double step;
int main()
{
int i, j;
double pi, full_sum = 0.0;
double start_time, run_time;
double sum[MAX_THREADS];
step = 1.0 / (double)num_steps;
for (j = 1; j <= MAX_THREADS; j++)
{
omp_set_num_threads(j);
full_sum = 0.0;
start_time = omp_get_wtime();
#pragma omp parallel private(i)
{
int id = omp_get_thread_num();
int numthreads = omp_get_num_threads();
double x;
double partial_sum = 0;
#pragma omp single
printf(" num_threads = %d", numthreads);
for (i = id; i < num_steps; i += numthreads)
{
x = (i + 0.5) * step;
partial_sum += +4.0 / (1.0 + x * x);
}
#pragma omp critical
full_sum += partial_sum;
}
pi = step * full_sum;
run_time = omp_get_wtime() - start_time;
printf("\n pi is %f in %f seconds %d threds \n ", pi, run_time, j);
}
}
|
GB_unaryop__lnot_int8_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int8_int16
// op(A') function: GB_tran__lnot_int8_int16
// C type: int8_t
// A type: int16_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int8_int16
(
int8_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int8_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__one_uint32_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__one_uint32_uint32
// op(A') function: GB_tran__one_uint32_uint32
// C type: uint32_t
// A type: uint32_t
// cast: ;
// unaryop: cij = 1
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1 ;
// casting
#define GB_CASTING(z, x) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__one_uint32_uint32
(
uint32_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__one_uint32_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_reduce_panel.c | //------------------------------------------------------------------------------
// GB_reduce_panel: s=reduce(A), reduce a matrix to a scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Reduce a matrix to a scalar using a panel-based method for built-in
// operators. No typecasting is performed. A must be sparse, hypersparse,
// or full (it cannot be bitmap). A cannot have any zombies. If A has zombies
// or is bitmap, GB_reduce_to_scalar_template is used instead.
{
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const GB_ATYPE *GB_RESTRICT Ax = (GB_ATYPE *) A->x ;
int64_t anz = GB_NNZ (A) ;
ASSERT (anz > 0) ;
ASSERT (!GB_IS_BITMAP (A)) ;
ASSERT (A->nzombies == 0) ;
#if GB_IS_ANY_MONOID
// the ANY monoid can take any entry, and terminate immediately
s = Ax [anz-1] ;
#else
//--------------------------------------------------------------------------
// reduce A to a scalar
//--------------------------------------------------------------------------
if (nthreads == 1)
{
//----------------------------------------------------------------------
// load the Panel with the first entries
//----------------------------------------------------------------------
GB_ATYPE Panel [GB_PANEL] ;
int64_t first_panel_size = GB_IMIN (GB_PANEL, anz) ;
for (int64_t k = 0 ; k < first_panel_size ; k++)
{
Panel [k] = Ax [k] ;
}
#if GB_HAS_TERMINAL
int panel_count = 0 ;
#endif
//----------------------------------------------------------------------
// reduce all entries to the Panel
//----------------------------------------------------------------------
for (int64_t p = GB_PANEL ; p < anz ; p += GB_PANEL)
{
if (p + GB_PANEL > anz)
{
// last partial panel
for (int64_t k = 0 ; k < anz-p ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
}
else
{
// whole panel
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
#if GB_HAS_TERMINAL
panel_count-- ;
if (panel_count <= 0)
{
// check for early exit only every 256 panels
panel_count = 256 ;
int count = 0 ;
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
count += (Panel [k] == GB_TERMINAL_VALUE) ;
}
if (count > 0)
{
break ;
}
}
#endif
}
}
//----------------------------------------------------------------------
// s = reduce (Panel)
//----------------------------------------------------------------------
s = Panel [0] ;
for (int64_t k = 1 ; k < first_panel_size ; k++)
{
// s = op (s, Panel [k]) ;
GB_ADD_ARRAY_TO_SCALAR (s, Panel, k) ;
}
}
else
{
//----------------------------------------------------------------------
// all tasks share a single early_exit flag
//----------------------------------------------------------------------
// If this flag gets set, all tasks can terminate early
#if GB_HAS_TERMINAL
bool early_exit = false ;
#endif
//----------------------------------------------------------------------
// each thread reduces its own slice in parallel
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// determine the work for this task
//------------------------------------------------------------------
// Task tid reduces Ax [pstart:pend-1] to the scalar W [tid]
int64_t pstart, pend ;
GB_PARTITION (pstart, pend, anz, tid, ntasks) ;
GB_ATYPE t = Ax [pstart] ;
//------------------------------------------------------------------
// skip this task if the terminal value has already been reached
//------------------------------------------------------------------
#if GB_HAS_TERMINAL
// check if another task has called for an early exit
bool my_exit ;
GB_ATOMIC_READ
my_exit = early_exit ;
if (!my_exit)
#endif
//------------------------------------------------------------------
// do the reductions for this task
//------------------------------------------------------------------
{
//--------------------------------------------------------------
// load the Panel with the first entries
//--------------------------------------------------------------
GB_ATYPE Panel [GB_PANEL] ;
int64_t my_anz = pend - pstart ;
int64_t first_panel_size = GB_IMIN (GB_PANEL, my_anz) ;
for (int64_t k = 0 ; k < first_panel_size ; k++)
{
Panel [k] = Ax [pstart + k] ;
}
#if GB_HAS_TERMINAL
int panel_count = 0 ;
#endif
//--------------------------------------------------------------
// reduce all entries to the Panel
//--------------------------------------------------------------
for (int64_t p = pstart + GB_PANEL ; p < pend ; p += GB_PANEL)
{
if (p + GB_PANEL > pend)
{
// last partial panel
for (int64_t k = 0 ; k < pend-p ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
}
else
{
// whole panel
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
#if GB_HAS_TERMINAL
panel_count-- ;
if (panel_count <= 0)
{
// check for early exit only every 256 panels
panel_count = 256 ;
int count = 0 ;
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
count += (Panel [k] == GB_TERMINAL_VALUE) ;
}
if (count > 0)
{
break ;
}
}
#endif
}
}
//--------------------------------------------------------------
// t = reduce (Panel)
//--------------------------------------------------------------
t = Panel [0] ;
for (int64_t k = 1 ; k < first_panel_size ; k++)
{
// t = op (t, Panel [k]) ;
GB_ADD_ARRAY_TO_SCALAR (t, Panel, k) ;
}
#if GB_HAS_TERMINAL
if (t == GB_TERMINAL_VALUE)
{
// tell all other tasks to exit early
GB_ATOMIC_WRITE
early_exit = true ;
}
#endif
}
//------------------------------------------------------------------
// save the results of this task
//------------------------------------------------------------------
W [tid] = t ;
}
//----------------------------------------------------------------------
// sum up the results of each slice using a single thread
//----------------------------------------------------------------------
s = W [0] ;
for (int tid = 1 ; tid < ntasks ; tid++)
{
// s = op (s, W [tid]), no typecast
GB_ADD_ARRAY_TO_SCALAR (s, W, tid) ;
}
}
#endif
}
|
test.c | #include <stdio.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#define N 100
int test_aligned(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
int *b = a;
// offload
#pragma omp target map(tofrom: b[0:100])
{
#pragma omp teams
#pragma omp distribute simd aligned(b: 8*sizeof(int))
for(int k=0; k<N; k++)
b[k] = k;
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int test_collapsed(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target map(tofrom: a[0:100])
{
#pragma omp teams
#pragma omp distribute simd collapse(2)
for(int k=0; k<N/4; k++)
for(int l=0; l<4; l++)
a[k*4+l] = k*4+l;
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int test_lastprivate(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target map(tofrom: a[0:100])
{
#pragma omp teams num_teams(1)
{
int n;
#pragma omp distribute simd lastprivate(n)
for(int k=0; k<N; k++) {
a[k] = k;
n = k;
}
a[0] = n;
}
}
// host
for(i=0; i<N; i++)
aa[i] = i;
aa[0] = N-1;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int test_linear(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
int l = 0;
// offload
#pragma omp target map(tofrom: a[0:100])
{
#pragma omp teams num_teams(1)
#pragma omp distribute simd
for(int k=0; k<N; k++) {
l = 2*k;
a[k] = l;
}
}
// host
for(i=0; i<N; i++)
aa[i] = 2*i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;;
}
}
return error;
}
int test_private(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
int n;
// offload
#pragma omp target map(tofrom: a[0:100])
{
#pragma omp teams
#pragma omp distribute simd private(n)
for(int k=0; k<N; k++) {
n = k;
a[k] = n;
}
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int test_safelen(){
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target map(tofrom: a[0:100])
{
#pragma omp teams num_teams(1)
#pragma omp distribute simd safelen(2)
for(int k=0; k<100; k++) {
if (k > 1){
a[k] = a[k-2] + 2;
}
else{
a[k] = k;
}
}
}
// host
for(i=0; i<N; i++)
aa[i] = i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return error;
}
}
return error;
}
int main()
{
int error = 0;
check_offloading();
// Clauses
error += test_aligned();
error += test_collapsed();
error += test_lastprivate();
error += test_linear();
error += test_private();
error += test_safelen();
// report
printf("done with %d errors\n", error);
return error;
}
|
GB_unaryop__identity_fp64_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp64_int8
// op(A') function: GB_tran__identity_fp64_int8
// C type: double
// A type: int8_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp64_int8
(
double *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp64_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB019-plusplus-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Race condition on outLen due to unprotected writes.
Adding private (outLen) can avoid race condition. But it is wrong semantically.
Data race pairs: we allow two pair to preserve the original code pattern.
1. outLen@72:12 vs. outLen@72:12
2. output[]@72:5 vs. output[]@72:5
*/
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int i ;
int inLen=1000 ;
int outLen = 0;
if (argc>1)
inLen= atoi(argv[1]);
int input[inLen];
int output[inLen];
for (i=0; i<inLen; ++i)
input[i]=i;
#pragma omp parallel for
for (i=0; i<inLen; ++i) {
output[outLen++] = input[i] ;
}
printf("output[0]=%d\n", output[0]);
return 0;
}
|
GB_unop__identity_uint8_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint8_uint16
// op(A') function: GB_unop_tran__identity_uint8_uint16
// C type: uint8_t
// A type: uint16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = (uint8_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint8_uint16
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint8_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
symm_c_coo_n_lo_row_conj.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#define CACHELINE 64
alphasparse_status_t
ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = mat->rows;
ALPHA_INT n = columns;
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT i = 0; i < mat->rows; i++)
for (ALPHA_INT j = 0; j < columns; j++)
alpha_mul(y[i * ldy + j], y[i * ldy + j], beta);
ALPHA_INT block_size = CACHELINE / sizeof(ALPHA_Number);
ALPHA_INT block_num = (columns + block_size - 1) / block_size;
if (num_threads > block_num)
num_threads = block_num;
#ifdef _OPENMP
#pragma omp parallel num_threads(num_threads)
#endif
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT bcl = cross_block_low(tid, num_threads, block_num) * block_size;
ALPHA_INT bch = cross_block_high(tid, num_threads, block_num) * block_size;
if (bch > columns)
bch = columns;
for (ALPHA_INT ai = 0; ai < mat->nnz; ai++)
{
ALPHA_INT ac = mat->col_indx[ai];
ALPHA_INT r = mat->row_indx[ai];
if (ac < r)
{
ALPHA_Number val;
alpha_mul_3c(val, alpha, mat->values[ai]);
for (ALPHA_INT c = bcl; c < bch; ++c)
alpha_madde(y[index2(r, c, ldy)], val, x[index2(ac, c, ldx)]);
for (ALPHA_INT c = bcl; c < bch; ++c)
alpha_madde(y[index2(ac, c, ldy)], val, x[index2(r, c, ldx)]);
}
else if (ac == r)
{
ALPHA_Number val;
alpha_mul_3c(val, alpha, mat->values[ai]);
for (ALPHA_INT c = bcl; c < bch; ++c)
alpha_madde(y[index2(r, c, ldy)], val, x[index2(ac, c, ldx)]);
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
subopt.c | /*
* suboptimal folding - Stefan Wuchty, Walter Fontana & Ivo Hofacker
*
* Vienna RNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <ctype.h>
#include <string.h>
#include <math.h>
#include "ViennaRNA/fold.h"
#include "ViennaRNA/constraints/hard.h"
#include "ViennaRNA/constraints/soft.h"
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/utils/strings.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/datastructures/lists.h"
#include "ViennaRNA/eval.h"
#include "ViennaRNA/params/basic.h"
#include "ViennaRNA/loops/all.h"
#include "ViennaRNA/cofold.h"
#include "ViennaRNA/gquad.h"
#include "ViennaRNA/alphabet.h"
#include "ViennaRNA/subopt.h"
/* hack */
#include "ViennaRNA/color_output.inc"
#ifdef _OPENMP
#include <omp.h>
#endif
#define true 1
#define false 0
#ifndef ON_SAME_STRAND
#define ON_SAME_STRAND(I, J, C) (((I) >= (C)) || ((J) < (C)))
#endif
/**
* @brief Sequence interval stack element used in subopt.c
*/
typedef struct INTERVAL {
int i;
int j;
int array_flag;
} INTERVAL;
typedef struct {
char *structure;
LIST *Intervals;
int partial_energy;
int is_duplex;
/* int best_energy; */ /* best attainable energy */
} STATE;
typedef struct {
LIST *Intervals;
LIST *Stack;
int nopush;
} subopt_env;
struct old_subopt_dat {
unsigned long max_sol;
unsigned long n_sol;
SOLUTION *SolutionList;
FILE *fp;
int cp;
};
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
PUBLIC int subopt_sorted = 0; /* output sorted by energy */
PUBLIC int density_of_states[MAXDOS + 1];
PUBLIC double print_energy = 9999; /* printing threshold for use with logML */
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
/* some backward compatibility stuff */
PRIVATE int backward_compat = 0;
PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL;
#ifdef _OPENMP
#pragma omp threadprivate(backward_compat_compound, backward_compat)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
PRIVATE SOLUTION *
wrap_subopt(char *seq,
char *structure,
vrna_param_t *parameters,
int delta,
int is_constrained,
int is_circular,
FILE *fp);
#endif
PRIVATE void
make_pair(int i,
int j,
STATE *state);
/* mark a gquadruplex in the resulting dot-bracket structure */
PRIVATE void
make_gquad(int i,
int L,
int l[3],
STATE *state);
PRIVATE INTERVAL *
make_interval(int i,
int j,
int ml);
PRIVATE STATE *
make_state(LIST *Intervals,
char *structure,
int partial_energy,
int is_duplex,
int length);
PRIVATE STATE *
copy_state(STATE *state);
PRIVATE void
print_state(STATE *state);
PRIVATE void
UNUSED print_stack(LIST *list);
PRIVATE LIST *
make_list(void);
PRIVATE void
push(LIST *list,
void *data);
PRIVATE void
*pop(LIST *list);
PRIVATE int
best_attainable_energy(vrna_fold_compound_t *vc,
STATE *state);
PRIVATE void
scan_interval(vrna_fold_compound_t *vc,
int i,
int j,
int array_flag,
int threshold,
STATE *state,
subopt_env *env);
PRIVATE void
free_interval_node(INTERVAL *node);
PRIVATE void
free_state_node(STATE *node);
PRIVATE void
push_back(LIST *Stack,
STATE *state);
PRIVATE char *
get_structure(STATE *state);
PRIVATE int
compare(const void *solution1,
const void *solution2);
PRIVATE int
compare_en(const void *solution1,
const void *solution2);
PRIVATE void
make_output(SOLUTION *SL,
int cp,
FILE *fp);
PRIVATE void
repeat(vrna_fold_compound_t *vc,
int i,
int j,
STATE *state,
int part_energy,
int temp_energy,
int best_energy,
int threshold,
subopt_env *env);
PRIVATE void
repeat_gquad(vrna_fold_compound_t *vc,
int i,
int j,
STATE *state,
int part_energy,
int temp_energy,
int best_energy,
int threshold,
subopt_env *env);
PRIVATE void
old_subopt_print(const char *structure,
float energy,
void *data);
PRIVATE void
old_subopt_store(const char *structure,
float energy,
void *data);
PRIVATE void
old_subopt_store_compressed(const char *structure,
float energy,
void *data);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
/*---------------------------------------------------------------------------*/
/*List routines--------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
PRIVATE void
make_pair(int i,
int j,
STATE *state)
{
state->structure[i - 1] = '(';
state->structure[j - 1] = ')';
}
PRIVATE void
make_gquad(int i,
int L,
int l[3],
STATE *state)
{
int x;
for (x = 0; x < L; x++) {
state->structure[i - 1 + x] = '+';
state->structure[i - 1 + x + L + l[0]] = '+';
state->structure[i - 1 + x + 2 * L + l[0] + l[1]] = '+';
state->structure[i - 1 + x + 3 * L + l[0] + l[1] + l[2]] = '+';
}
}
/*---------------------------------------------------------------------------*/
PRIVATE INTERVAL *
make_interval(int i,
int j,
int array_flag)
{
INTERVAL *interval;
interval = lst_newnode(sizeof(INTERVAL));
interval->i = i;
interval->j = j;
interval->array_flag = array_flag;
return interval;
}
/*---------------------------------------------------------------------------*/
PRIVATE void
free_interval_node(INTERVAL *node)
{
lst_freenode(node);
}
/*---------------------------------------------------------------------------*/
PRIVATE void
free_state_node(STATE *node)
{
free(node->structure);
if (node->Intervals)
lst_kill(node->Intervals, lst_freenode);
lst_freenode(node);
}
/*---------------------------------------------------------------------------*/
PRIVATE STATE *
make_state(LIST *Intervals,
char *structure,
int partial_energy,
int is_duplex,
int length)
{
STATE *state;
state = lst_newnode(sizeof(STATE));
if (Intervals)
state->Intervals = Intervals;
else
state->Intervals = lst_init();
if (structure) {
state->structure = structure;
} else {
int i;
state->structure = (char *)vrna_alloc(length + 1);
for (i = 0; i < length; i++)
state->structure[i] = '.';
}
state->partial_energy = partial_energy;
return state;
}
/*---------------------------------------------------------------------------*/
PRIVATE STATE *
copy_state(STATE *state)
{
STATE *new_state;
void *after;
INTERVAL *new_interval, *next;
new_state = lst_newnode(sizeof(STATE));
new_state->Intervals = lst_init();
new_state->partial_energy = state->partial_energy;
/* new_state->best_energy = state->best_energy; */
if (state->Intervals->count) {
after = LST_HEAD(new_state->Intervals);
for (next = lst_first(state->Intervals); next; next = lst_next(next)) {
new_interval = lst_newnode(sizeof(INTERVAL));
*new_interval = *next;
lst_insertafter(new_state->Intervals, new_interval, after);
after = new_interval;
}
}
new_state->structure = strdup(state->structure);
if (!new_state->structure)
vrna_message_error("out of memory");
return new_state;
}
/*---------------------------------------------------------------------------*/
/*@unused @*/ PRIVATE void
print_state(STATE *state)
{
INTERVAL *next;
if (state->Intervals->count) {
printf("%d intervals:\n", state->Intervals->count);
for (next = lst_first(state->Intervals); next; next = lst_next(next))
printf("[%d,%d],%d ", next->i, next->j, next->array_flag);
printf("\n");
}
printf("partial structure: %s\n", state->structure);
printf("\n");
printf(" partial_energy: %d\n", state->partial_energy);
/* printf(" best_energy: %d\n", state->best_energy); */
(void)fflush(stdout);
}
/*---------------------------------------------------------------------------*/
/*@unused @*/ PRIVATE void
print_stack(LIST *list)
{
void *rec;
printf("================\n");
printf("%d states\n", list->count);
for (rec = lst_first(list); rec; rec = lst_next(rec)) {
printf("state-----------\n");
print_state(rec);
}
printf("================\n");
}
/*---------------------------------------------------------------------------*/
PRIVATE LIST *
make_list(void)
{
return lst_init();
}
/*---------------------------------------------------------------------------*/
PRIVATE void
push(LIST *list,
void *data)
{
lst_insertafter(list, data, LST_HEAD(list));
}
/* PRIVATE void */
/* push_stack(STATE *state) { */ /* keep the stack sorted by energy */
/* STATE *after, *next; */
/* nopush = false; */
/* next = after = LST_HEAD(Stack); */
/* while ( next = lst_next(next)) { */
/* if ( next->best_energy >= state->best_energy ) break; */
/* after = next; */
/* } */
/* lst_insertafter(Stack, state, after); */
/* } */
/*---------------------------------------------------------------------------*/
PRIVATE void *
pop(LIST *list)
{
void *data;
data = lst_deletenext(list, LST_HEAD(list));
return data;
}
/*---------------------------------------------------------------------------*/
/*auxiliary routines---------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
PRIVATE int
best_attainable_energy(vrna_fold_compound_t *vc,
STATE *state)
{
/* evaluation of best possible energy attainable within remaining intervals */
register int sum;
INTERVAL *next;
vrna_md_t *md;
vrna_mx_mfe_t *matrices;
int *indx;
md = &(vc->params->model_details);
matrices = vc->matrices;
indx = vc->jindx;
sum = state->partial_energy; /* energy of already found elements */
for (next = lst_first(state->Intervals); next; next = lst_next(next)) {
if (next->array_flag == 0)
sum += (md->circ) ? matrices->Fc : matrices->f5[next->j];
else if (next->array_flag == 1)
sum += matrices->fML[indx[next->j] + next->i];
else if (next->array_flag == 2)
sum += matrices->c[indx[next->j] + next->i];
else if (next->array_flag == 3)
sum += matrices->fM1[indx[next->j] + next->i];
else if (next->array_flag == 4)
sum += matrices->fc[next->i];
else if (next->array_flag == 5)
sum += matrices->fc[next->j];
else if (next->array_flag == 6)
sum += matrices->ggg[indx[next->j] + next->i];
}
return sum;
}
/*---------------------------------------------------------------------------*/
PRIVATE void
push_back(LIST *Stack,
STATE *state)
{
push(Stack, copy_state(state));
return;
}
/*---------------------------------------------------------------------------*/
PRIVATE char *
get_structure(STATE *state)
{
char *structure;
structure = strdup(state->structure);
return structure;
}
/*---------------------------------------------------------------------------*/
PRIVATE int
compare(const void *solution1,
const void *solution2)
{
if (((SOLUTION *)solution1)->energy > ((SOLUTION *)solution2)->energy)
return 1;
if (((SOLUTION *)solution1)->energy < ((SOLUTION *)solution2)->energy)
return -1;
return strcmp(((SOLUTION *)solution1)->structure,
((SOLUTION *)solution2)->structure);
}
PRIVATE int
compare_en(const void *solution1,
const void *solution2)
{
if (((SOLUTION *)solution1)->energy > ((SOLUTION *)solution2)->energy)
return 1;
if (((SOLUTION *)solution1)->energy < ((SOLUTION *)solution2)->energy)
return -1;
return 0;
}
/*---------------------------------------------------------------------------*/
PRIVATE void
make_output(SOLUTION *SL,
int cp,
FILE *fp) /* prints stuff */
{
SOLUTION *sol;
for (sol = SL; sol->structure != NULL; sol++) {
char *e_string = vrna_strdup_printf(" %6.2f", sol->energy);
char *ss = vrna_db_unpack(sol->structure);
char *s = vrna_cut_point_insert(ss, cp);
print_structure(fp, s, e_string);
free(s);
free(ss);
free(e_string);
}
}
PRIVATE STATE *
derive_new_state(int i,
int j,
STATE *s,
int e,
int flag)
{
STATE *s_new = copy_state(s);
INTERVAL *ival = make_interval(i, j, flag);
push(s_new->Intervals, ival);
s_new->partial_energy += e;
return s_new;
}
PRIVATE void
fork_state(int i,
int j,
STATE *s,
int e,
int flag,
subopt_env *env)
{
STATE *s_new = derive_new_state(i, j, s, e, flag);
push(env->Stack, s_new);
env->nopush = false;
}
PRIVATE void
fork_int_state(int i,
int j,
int p,
int q,
STATE *s,
int e,
subopt_env *env)
{
STATE *s_new = derive_new_state(p, q, s, e, 2);
make_pair(i, j, s_new);
make_pair(p, q, s_new);
push(env->Stack, s_new);
env->nopush = false;
}
PRIVATE void
fork_state_pair(int i,
int j,
STATE *s,
int e,
subopt_env *env)
{
STATE *new_state;
new_state = copy_state(s);
make_pair(i, j, new_state);
new_state->partial_energy += e;
push(env->Stack, new_state);
env->nopush = false;
}
PRIVATE void
fork_two_states_pair(int i,
int j,
int k,
STATE *s,
int e,
int flag1,
int flag2,
subopt_env *env)
{
INTERVAL *interval1, *interval2;
STATE *new_state;
new_state = copy_state(s);
interval1 = make_interval(i + 1, k - 1, flag1);
interval2 = make_interval(k, j - 1, flag2);
if (k - i < j - k) {
/* push larger interval first */
push(new_state->Intervals, interval1);
push(new_state->Intervals, interval2);
} else {
push(new_state->Intervals, interval2);
push(new_state->Intervals, interval1);
}
make_pair(i, j, new_state);
new_state->partial_energy += e;
push(env->Stack, new_state);
env->nopush = false;
}
PRIVATE void
fork_two_states(int i,
int j,
int p,
int q,
STATE *s,
int e,
int flag1,
int flag2,
subopt_env *env)
{
INTERVAL *interval1, *interval2;
STATE *new_state;
new_state = copy_state(s);
interval1 = make_interval(i, j, flag1);
interval2 = make_interval(p, q, flag2);
if ((j - i) < (q - p)) {
push(new_state->Intervals, interval1);
push(new_state->Intervals, interval2);
} else {
push(new_state->Intervals, interval2);
push(new_state->Intervals, interval1);
}
new_state->partial_energy += e;
push(env->Stack, new_state);
env->nopush = false;
}
/*---------------------------------------------------------------------------*/
/* start of subopt backtracking ---------------------------------------------*/
/*---------------------------------------------------------------------------*/
PUBLIC SOLUTION *
vrna_subopt(vrna_fold_compound_t *vc,
int delta,
int sorted,
FILE *fp)
{
struct old_subopt_dat data;
vrna_subopt_callback *cb;
data.SolutionList = NULL;
data.max_sol = 128;
data.n_sol = 0;
data.fp = fp;
data.cp = vc->cutpoint;
if (vc) {
/* SolutionList stores the suboptimal structures found */
data.SolutionList = (SOLUTION *)vrna_alloc(data.max_sol * sizeof(SOLUTION));
/* end initialize ------------------------------------------------------- */
if (fp) {
float min_en;
char *SeQ, *energies = NULL;
if (vc->strands > 1)
min_en = vrna_mfe_dimer(vc, NULL);
else
min_en = vrna_mfe(vc, NULL);
SeQ = vrna_cut_point_insert(vc->sequence, vc->cutpoint);
energies = vrna_strdup_printf(" %6.2f %6.2f", min_en, (float)delta / 100.);
print_structure(fp, SeQ, energies);
free(SeQ);
free(energies);
vrna_mx_mfe_free(vc);
}
cb = old_subopt_store;
if (fp)
cb = (sorted) ? old_subopt_store_compressed : old_subopt_print;
/* call subopt() */
vrna_subopt_cb(vc, delta, cb, (void *)&data);
if (sorted) {
/* sort structures by energy */
if (data.n_sol > 0) {
int (*compare_fun)(const void *a, const void *b);
switch (sorted) {
case VRNA_SORT_BY_ENERGY_ASC:
compare_fun = compare_en;
break;
default: /* a.k.a. VRNA_SORT_BY_ENERGY_LEXICOGRAPHIC_ASC */
compare_fun = compare;
break;
}
qsort(data.SolutionList, data.n_sol - 1, sizeof(SOLUTION), compare_fun);
}
if (fp)
make_output(data.SolutionList, vc->cutpoint, fp);
}
if (fp) {
/* we've printed everything -- free solutions */
SOLUTION *sol;
for (sol = data.SolutionList; sol->structure != NULL; sol++)
free(sol->structure);
free(data.SolutionList);
data.SolutionList = NULL;
}
}
return data.SolutionList;
}
PUBLIC void
vrna_subopt_cb(vrna_fold_compound_t *vc,
int delta,
vrna_subopt_callback *cb,
void *data)
{
subopt_env *env;
STATE *state;
INTERVAL *interval;
unsigned int *so, *ss, *se;
int maxlevel, count, partial_energy, old_dangles, logML, dangle_model, length, circular,
threshold;
double structure_energy, min_en, eprint;
char *struc, *structure;
float correction;
vrna_param_t *P;
vrna_md_t *md;
int minimal_energy;
int Fc;
int *f5;
vrna_fold_compound_prepare(vc, VRNA_OPTION_MFE | VRNA_OPTION_HYBRID);
length = vc->length;
so = vc->strand_order;
ss = vc->strand_start;
se = vc->strand_end;
P = vc->params;
md = &(P->model_details);
/* do mfe folding to get fill arrays and get ground state energy */
/* in case dangles is neither 0 or 2, set dangles=2 while folding */
circular = md->circ;
logML = md->logML;
old_dangles = dangle_model = md->dangles;
if (md->uniq_ML != 1) /* failsafe mechanism to enforce valid fM1 array */
md->uniq_ML = 1;
/* temporarily set dangles to 2 if necessary */
if ((md->dangles != 0) && (md->dangles != 2))
md->dangles = 2;
struc = (char *)vrna_alloc(sizeof(char) * (length + 1));
if (circular) {
min_en = vrna_mfe(vc, struc);
Fc = vc->matrices->Fc;
f5 = vc->matrices->f5;
/* restore dangle model */
md->dangles = old_dangles;
/* re-evaluate in case we're using logML etc */
min_en = vrna_eval_structure(vc, struc);
} else {
min_en = vrna_mfe_dimer(vc, struc);
f5 = vc->matrices->f5;
/* restore dangle model */
md->dangles = old_dangles;
/* re-evaluate in case we're using logML etc */
min_en = vrna_eval_structure(vc, struc);
}
free(struc);
eprint = print_energy + min_en;
correction = (min_en < 0) ? -0.1 : 0.1;
/* Initialize ------------------------------------------------------------ */
maxlevel = 0;
count = 0;
partial_energy = 0;
/* Initialize the stack ------------------------------------------------- */
minimal_energy = (circular) ? Fc : f5[length];
threshold = minimal_energy + delta;
if (threshold >= INF) {
vrna_message_warning("Energy range too high, limiting to reasonable value");
threshold = INF - EMAX;
}
/* init env data structure */
env = (subopt_env *)vrna_alloc(sizeof(subopt_env));
env->Stack = NULL;
env->nopush = true;
env->Stack = make_list(); /* anchor */
env->Intervals = make_list(); /* initial state: */
interval = make_interval(1, length, 0); /* interval [1,length,0] */
push(env->Intervals, interval);
env->nopush = false;
state = make_state(env->Intervals, NULL, partial_energy, 0, length);
/* state->best_energy = minimal_energy; */
push(env->Stack, state);
env->nopush = false;
/* end initialize ------------------------------------------------------- */
while (1) {
/* forever, til nothing remains on stack */
maxlevel = (env->Stack->count > maxlevel ? env->Stack->count : maxlevel);
if (LST_EMPTY(env->Stack)) {
/* we are done! clean up and quit */
/* fprintf(stderr, "maxlevel: %d\n", maxlevel); */
lst_kill(env->Stack, free_state_node);
cb(NULL, 0, data); /* NULL (last time to call callback function */
break;
}
/* pop the last element ---------------------------------------------- */
state = pop(env->Stack); /* current state to work with */
if (LST_EMPTY(state->Intervals)) {
int e;
/* state has no intervals left: we got a solution */
count++;
structure = get_structure(state);
structure_energy = state->partial_energy / 100.;
#ifdef CHECK_ENERGY
structure_energy = vrna_eval_structure(vc, structure);
if (!logML)
if ((double)(state->partial_energy / 100.) != structure_energy) {
vrna_message_error("%s %6.2f %6.2f",
structure,
state->partial_energy / 100.,
structure_energy);
exit(1);
}
#endif
if (logML || (dangle_model == 1) || (dangle_model == 3)) /* recalc energy */
structure_energy = vrna_eval_structure(vc, structure);
e = (int)((structure_energy - min_en) * 10. - correction); /* avoid rounding errors */
if (e > MAXDOS)
e = MAXDOS;
density_of_states[e]++;
if (structure_energy <= eprint) {
char *outstruct = vrna_cut_point_insert(structure, (vc->strands > 1) ? ss[so[1]] : -1);
cb((const char *)outstruct, structure_energy, data);
free(outstruct);
}
free(structure);
} else {
/* get (and remove) next interval of state to analyze */
interval = pop(state->Intervals);
scan_interval(vc, interval->i, interval->j, interval->array_flag, threshold, state, env);
free_interval_node(interval); /* free the current interval */
}
free_state_node(state); /* free the current state */
} /* end of while (1) */
/* cleanup memory */
free(env);
}
PRIVATE void
scan_interval(vrna_fold_compound_t *vc,
int i,
int j,
int array_flag,
int threshold,
STATE *state,
subopt_env *env)
{
/* real backtrack routine */
/* array_flag = 0: trace back in f5-array */
/* array_flag = 1: trace back in fML-array */
/* array_flag = 2: trace back in repeat() */
/* array_flag = 3: trace back in fM1-array */
STATE *new_state, *temp_state;
INTERVAL *new_interval;
vrna_param_t *P;
vrna_md_t *md;
register int k, fi, cij, ij;
register int type;
register int dangle_model;
register int noLP;
unsigned int *sn, *so, *ss, *se;
int element_energy, best_energy;
int *fc, *f5, *c, *fML, *fM1, *ggg;
int FcH, FcI, FcM, *fM2;
int length, *indx, *rtype, circular, with_gquad, turn;
char *ptype;
short *S1;
unsigned char *hard_constraints, hc_decompose;
vrna_hc_t *hc;
vrna_sc_t *sc;
length = vc->length;
sn = vc->strand_number;
so = vc->strand_order;
ss = vc->strand_start;
se = vc->strand_end;
indx = vc->jindx;
ptype = vc->ptype;
S1 = vc->sequence_encoding;
P = vc->params;
md = &(P->model_details);
rtype = &(md->rtype[0]);
dangle_model = md->dangles;
noLP = md->noLP;
circular = md->circ;
with_gquad = md->gquad;
turn = md->min_loop_size;
fc = vc->matrices->fc;
f5 = vc->matrices->f5;
c = vc->matrices->c;
fML = vc->matrices->fML;
fM1 = vc->matrices->fM1;
ggg = vc->matrices->ggg;
FcH = vc->matrices->FcH;
FcI = vc->matrices->FcI;
FcM = vc->matrices->FcM;
fM2 = vc->matrices->fM2;
hc = vc->hc;
hard_constraints = hc->mx;
sc = vc->sc;
best_energy = best_attainable_energy(vc, state); /* .. on remaining intervals */
env->nopush = true;
if ((i > 1) && (!array_flag))
vrna_message_error("Error while backtracking!");
if ((j < i + turn + 1) &&
((sn[i] == so[1]) || (sn[j] == so[0]))) {
/* minimal structure element */
if (array_flag == 0)
/* do not forget to add f5[j], since it may contain pseudo energies from soft constraining */
state->partial_energy += f5[j];
if (env->nopush) {
push_back(env->Stack, state);
env->nopush = false;
}
return;
}
ij = indx[j] + i;
/* 13131313131313131313131313131313131313131313131313131313131313131313131 */
if (array_flag == 3 || array_flag == 1) {
/* array_flag = 3: interval i,j was generated during */
/* a multiloop decomposition using array fM1 in repeat() */
/* or in this block */
/* array_flag = 1: interval i,j was generated from a */
/* stack, bulge, or internal loop in repeat() */
/* or in this block */
if ((hc->up_ml[j]) &&
(((array_flag == 3) && (fM1[indx[j - 1] + i] != INF)) ||
(fML[indx[j - 1] + i] != INF))) {
if (array_flag == 3)
fi = fM1[indx[j - 1] + i] + P->MLbase;
else
fi = fML[indx[j - 1] + i] + P->MLbase;
if (sc) {
if (sc->energy_up)
fi += sc->energy_up[j][1];
if (sc->f)
fi += sc->f(i, j, i, j - 1, VRNA_DECOMP_ML_ML, sc->data);
}
if ((fi + best_energy <= threshold) && (sn[j - 1] == sn[j])) {
/* no basepair, nibbling of 3'-end */
if ((sc) &&
(sc->energy_up))
fork_state(i, j - 1, state, P->MLbase + sc->energy_up[j][1], array_flag, env);
else
fork_state(i, j - 1, state, P->MLbase, array_flag, env);
}
}
hc_decompose = hard_constraints[length * i + j];
if (hc_decompose & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) {
/* i,j may pair */
cij = c[ij];
if (cij != INF) {
type = vrna_get_ptype(ij, ptype);
switch (dangle_model) {
case 0:
element_energy = E_MLstem(type, -1, -1, P);
break;
default:
element_energy = E_MLstem(type,
(((i > 1) && (sn[i - 1] == sn[i])) || circular) ? S1[i - 1] : -1,
(((j < length) && (sn[j] == sn[j + 1])) || circular) ? S1[j + 1] : -1,
P);
break;
}
if (sc) {
if (sc->f)
element_energy += sc->f(i, j, i, j, VRNA_DECOMP_ML_STEM, sc->data);
}
cij += element_energy;
if (cij + best_energy <= threshold)
repeat(vc, i, j, state, element_energy, 0, best_energy, threshold, env);
}
} else if ((with_gquad) && (ggg[ij] != INF)) {
element_energy = E_MLstem(0, -1, -1, P);
cij = ggg[ij] + element_energy;
if (cij + best_energy <= threshold)
repeat_gquad(vc, i, j, state, element_energy, 0, best_energy, threshold, env);
}
} /* array_flag == 3 || array_flag == 1 */
/* 11111111111111111111111111111111111111111111111111111111111111111111111 */
if (array_flag == 1) {
/* array_flag = 1: interval i,j was generated from a */
/* stack, bulge, or internal loop in repeat() */
/* or in this block */
int stopp, k1j;
if ((sn[i - 1] == sn[i]) && (sn[j] == sn[j + 1])) {
/*backtrack in FML only if multiloop is possible*/
for (k = i + turn + 1; k <= j - 1 - turn; k++) {
/* Multiloop decomposition if i,j contains more than 1 stack */
if ((with_gquad) &&
(sn[k] == sn[k + 1]) &&
(fML[indx[k] + i] != INF) &&
(ggg[indx[j] + k + 1] != INF)) {
element_energy = E_MLstem(0, -1, -1, P);
if (fML[indx[k] + i] + ggg[indx[j] + k + 1] + element_energy + best_energy <= threshold) {
temp_state = derive_new_state(i, k, state, 0, array_flag);
env->nopush = false;
repeat_gquad(vc,
k + 1,
j,
temp_state,
element_energy,
fML[indx[k] + i],
best_energy,
threshold,
env);
free_state_node(temp_state);
}
}
k1j = indx[j] + k + 1;
if ((hard_constraints[length * j + k + 1] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) &&
(fML[indx[k] + i] != INF) &&
(c[k1j] != INF)) {
short s5, s3;
type = vrna_get_ptype(k1j, ptype);
switch (dangle_model) {
case 0:
s5 = s3 = -1;
break;
default:
s5 = (sn[i - 1] == sn[i]) ? S1[k] : -1;
s3 = (sn[j] == sn[j + 1]) ? S1[j + 1] : -1;
break;
}
element_energy = E_MLstem(type, s5, s3, P);
if (sc) {
if (sc->f)
element_energy += sc->f(i, j, k, k + 1, VRNA_DECOMP_ML_ML_STEM, sc->data);
}
if (sn[k] == sn[k + 1]) {
if (fML[indx[k] + i] + c[k1j] + element_energy + best_energy <= threshold) {
temp_state = derive_new_state(i, k, state, 0, array_flag);
env->nopush = false;
repeat(vc,
k + 1,
j,
temp_state,
element_energy,
fML[indx[k] + i],
best_energy,
threshold,
env);
free_state_node(temp_state);
}
}
}
}
}
if (vc->strands > 1) {
stopp = se[so[0]] - 1; /*if cp -1: k on cut, => no ml*/
stopp = MIN2(stopp, j - 1 - turn);
if (i > ss[so[1]])
stopp = j - 1 - turn;
else if (i == ss[so[1]])
stopp = 0; /*not a multi loop*/
} else {
stopp = j - 1 - turn;
}
int up = 1;
for (k = i; k <= stopp; k++, up++) {
if (hc->up_ml[i] >= up) {
k1j = indx[j] + k + 1;
/* Multiloop decomposition if i,j contains only 1 stack */
if ((with_gquad) && (ggg[k1j] != INF)) {
element_energy = E_MLstem(0, -1, -1, P) + P->MLbase * up;
if (sc)
if (sc->energy_up)
element_energy += sc->energy_up[i][up];
if (ggg[k1j] + element_energy + best_energy <= threshold)
repeat_gquad(vc, k + 1, j, state, element_energy, 0, best_energy, threshold, env);
}
if ((hard_constraints[length * j + k + 1] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) &&
(c[k1j] != INF)) {
int s5, s3;
type = vrna_get_ptype(k1j, ptype);
switch (dangle_model) {
case 0:
s5 = s3 = -1;
break;
default:
s5 = (sn[k - 1] == sn[k]) ? S1[k] : -1;
s3 = (sn[j] == sn[j + 1]) ? S1[j + 1] : -1;
break;
}
element_energy = E_MLstem(type, s5, s3, P);
element_energy += P->MLbase * up;
if (sc) {
if (sc->energy_up)
element_energy += sc->energy_up[i][up];
if (sc->f)
element_energy += sc->f(i, j, k + 1, j, VRNA_DECOMP_ML_STEM, sc->data);
}
if (c[k1j] + element_energy + best_energy <= threshold)
repeat(vc, k + 1, j, state, element_energy, 0, best_energy, threshold, env);
}
}
}
} /* array_flag == 1 */
/* 22222222222222222222222222222222222222222222222222 */
/* */
/* array_flag = 2: interval i,j was generated from a */
/* stack, bulge, or internal loop in repeat() */
/* */
/* 22222222222222222222222222222222222222222222222222 */
if (array_flag == 2) {
repeat(vc, i, j, state, 0, 0, best_energy, threshold, env);
if (env->nopush)
if (!noLP)
vrna_message_warning("%d,%d\nOops, no solution in repeat!", i, j);
return;
}
/* 00000000000000000000000000000000000000000000000000 */
/* */
/* array_flag = 0: interval i,j was found while */
/* tracing back through f5-array and c-array */
/* or within this block */
/* */
/* 00000000000000000000000000000000000000000000000000 */
if ((array_flag == 0) && !circular) {
int s5, s3, kj, tmp_en;
if ((hc->up_ext[j]) &&
(f5[j - 1] != INF)) {
tmp_en = 0;
if (sc) {
if (sc->energy_up)
tmp_en += sc->energy_up[j][1];
if (sc->f)
tmp_en += sc->f(1, j, 1, j - 1, VRNA_DECOMP_EXT_EXT, sc->data);
}
if (f5[j - 1] + tmp_en + best_energy <= threshold)
/* no basepair, nibbling of 3'-end */
fork_state(i, j - 1, state, tmp_en, 0, env);
}
for (k = j - turn - 1; k > 1; k--) {
kj = indx[j] + k;
if ((with_gquad) &&
(sn[k] == sn[j]) &&
(f5[k - 1] != INF) &&
(ggg[kj] != INF)) {
element_energy = 0;
if (f5[k - 1] + ggg[kj] + element_energy + best_energy <= threshold) {
temp_state = derive_new_state(1, k - 1, state, 0, 0);
env->nopush = false;
/* backtrace the quadruplex */
repeat_gquad(vc,
k,
j,
temp_state,
element_energy,
f5[k - 1],
best_energy,
threshold,
env);
free_state_node(temp_state);
}
}
if ((hard_constraints[length * j + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) &&
(f5[k - 1] != INF) &&
(c[kj] != INF)) {
type = vrna_get_ptype(kj, ptype);
/* k and j pair */
switch (dangle_model) {
case 0:
s5 = s3 = -1;
break;
default:
s5 = (sn[k - 1] == sn[k]) ? S1[k - 1] : -1;
s3 = ((j < length) && (sn[j] == sn[j + 1])) ? S1[j + 1] : -1;
break;
}
element_energy = vrna_E_ext_stem(type, s5, s3, P);
if (sn[k] != sn[j]) /*&&(state->is_duplex==0))*/
element_energy += P->DuplexInit;
/*state->is_duplex=1;*/
if (sc) {
if (sc->f)
element_energy += sc->f(1, j, k - 1, k, VRNA_DECOMP_EXT_EXT_STEM, sc->data);
}
if (f5[k - 1] + c[kj] + element_energy + best_energy <= threshold) {
temp_state = derive_new_state(1, k - 1, state, 0, 0);
env->nopush = false;
repeat(vc, k, j, temp_state, element_energy, f5[k - 1], best_energy, threshold, env);
free_state_node(temp_state);
}
}
}
kj = indx[j] + 1;
if ((with_gquad) &&
(sn[k] == sn[j]) &&
(ggg[kj] != INF)) {
element_energy = 0;
if (ggg[kj] + element_energy + best_energy <= threshold)
/* backtrace the quadruplex */
repeat_gquad(vc, 1, j, state, element_energy, 0, best_energy, threshold, env);
}
if ((hard_constraints[length + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) &&
(c[kj] != INF)) {
type = vrna_get_ptype(kj, ptype);
s5 = -1;
switch (dangle_model) {
case 0:
s3 = -1;
break;
default:
s3 = (j < length) && (sn[j] == sn[j + 1]) ? S1[j + 1] : -1;
break;
}
element_energy = vrna_E_ext_stem(type, s5, s3, P);
if (sn[1] != sn[j])
element_energy += P->DuplexInit;
if (sc) {
if (sc->f)
element_energy += sc->f(1, j, 1, j, VRNA_DECOMP_EXT_STEM, sc->data);
}
if (c[kj] + element_energy + best_energy <= threshold)
repeat(vc, 1, j, state, element_energy, 0, best_energy, threshold, env);
}
} /* end array_flag == 0 && !circular*/
/* or do we subopt circular? */
else if (array_flag == 0) {
int k, l, p, q, tmp_en;
/* if we've done everything right, we will never reach this case more than once */
/* right after the initilization of the stack with ([1,n], empty, 0) */
/* lets check, if we can have an open chain without breaking the threshold */
/* this is an ugly work-arround cause in case of an open chain we do not have to */
/* backtrack anything further... */
if (hc->up_ext[1] >= length) {
tmp_en = 0;
if (sc) {
if (sc->energy_up)
tmp_en += sc->energy_up[1][length];
if (sc->f)
tmp_en += sc->f(1, j, 1, j, VRNA_DECOMP_EXT_UP, sc->data);
}
if (tmp_en <= threshold) {
new_state = derive_new_state(1, 2, state, 0, 0);
new_state->partial_energy = 0;
push(env->Stack, new_state);
env->nopush = false;
}
}
/* ok, lets check if we can do an exterior hairpin without breaking the threshold */
/* best energy should be 0 if we are here */
if (FcH + best_energy <= threshold) {
/* lets search for all exterior hairpin cases, that fit into our threshold barrier */
/* we use index k,l to avoid confusion with i,j index of our state... */
/* if we reach here, i should be 1 and j should be n respectively */
for (k = i; k < j; k++) {
if (hc->up_hp[1] < k)
break;
for (l = j; l >= k + turn + 1; l--) {
int kl, tmpE;
kl = indx[l] + k;
if (c[kl] != INF) {
tmpE = vrna_E_hp_loop(vc, l, k);
if (c[kl] + tmpE + best_energy <= threshold) {
/* what we really have to do is something like this, isn't it? */
/* we have to create a new state, with interval [k,l], then we */
/* add our loop energy as initial energy of this state and put */
/* the state onto the stack R... for further refinement... */
/* we also denote this new interval to be scanned in C */
fork_state(k, l, state, tmpE, 2, env);
}
}
}
}
}
/* now lets see, if we can do an exterior interior loop without breaking the threshold */
if (FcI + best_energy <= threshold) {
/* now we search for our exterior interior loop possibilities */
for (k = i; k < j; k++) {
for (l = j; l >= k + turn + 1; l--) {
int kl, type, tmpE;
kl = indx[l] + k; /* just confusing these indices ;-) */
if ((hard_constraints[length * k + l] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) &&
(c[kl] != INF)) {
type = rtype[vrna_get_ptype(kl, ptype)];
for (p = l + 1; p < j; p++) {
int u1, qmin;
u1 = p - l - 1;
if (u1 + k - 1 > MAXLOOP)
break;
if (hc->up_int[l + 1] < u1)
break;
qmin = u1 + k - 1 + j - MAXLOOP;
if (qmin < p + turn + 1)
qmin = p + turn + 1;
for (q = j; q >= qmin; q--) {
int u2, type_2;
if (hc->up_int[q + 1] < (j - q + k - 1))
break;
if ((hard_constraints[length * p + q] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) &&
(c[indx[q] + p] != INF)) {
type_2 = rtype[vrna_get_ptype(indx[q] + p, ptype)];
u2 = k - 1 + j - q;
if (u1 + u2 > MAXLOOP)
continue;
tmpE = E_IntLoop(u1,
u2,
type,
type_2,
S1[l + 1],
S1[k - 1],
S1[p - 1],
S1[q + 1],
P);
if (sc) {
if (sc->energy_up)
tmpE += sc->energy_up[l + 1][p - l - 1]
+ sc->energy_up[q + 1][j - q]
+ sc->energy_up[1][k - 1];
if (sc->energy_stack) {
if (u1 + u2 == 0) {
tmpE += sc->energy_stack[k]
+ sc->energy_stack[l]
+ sc->energy_stack[p]
+ sc->energy_stack[q];
}
}
}
if (c[kl] + c[indx[q] + p] + tmpE + best_energy <= threshold) {
/* ok, similar to the hairpin stuff, we add new states onto the stack R */
/* but in contrast to the hairpin decomposition, we have to add two new */
/* intervals, enclosed by k,l and p,q respectively and we also have to */
/* add the partial energy, that comes from the exterior interior loop */
fork_two_states(k, l, p, q, state, tmpE, 2, 2, env);
}
}
}
}
}
}
}
}
/* and last but not least, we have a look, if we can do an exterior multiloop within the energy threshold */
if (FcM <= threshold) {
/* this decomposition will be somehow more complicated...so lets see what we do here... */
/* first we want to find out which split inidices we can use without exceeding the threshold */
int tmpE2;
for (k = turn + 1; k < j - 2 * turn; k++) {
if ((fML[indx[k] + 1] != INF) &&
(fM2[k + 1] != INF)) {
tmpE2 = fML[indx[k] + 1] + fM2[k + 1] + P->MLclosing;
if (tmpE2 + best_energy <= threshold) {
/* grmpfh, we have found a possible split index k so we have to split fM2 and fML now */
/* lets do it first in fM2 anyway */
for (l = k + turn + 2; l < j - turn - 1; l++) {
tmpE2 = fM1[indx[l] + k + 1] + fM1[indx[j] + l + 1];
if (tmpE2 + fML[indx[k] + 1] + P->MLclosing <= threshold) {
/* we've (hopefully) found a valid decomposition of fM2 and therefor we have all */
/* three intervals for our new state to be pushed on stack R */
new_state = copy_state(state);
/* first interval leads for search in fML array */
new_interval = make_interval(1, k, 1);
push(new_state->Intervals, new_interval);
env->nopush = false;
/* next, we have the first interval that has to be traced in fM1 */
new_interval = make_interval(k + 1, l, 3);
push(new_state->Intervals, new_interval);
env->nopush = false;
/* and the last of our three intervals is also one to be traced within fM1 array... */
new_interval = make_interval(l + 1, j, 3);
push(new_state->Intervals, new_interval);
env->nopush = false;
/* mmh, we add the energy for closing the multiloop now... */
new_state->partial_energy += P->MLclosing;
/* next we push our state onto the R stack */
push(env->Stack, new_state);
env->nopush = false;
}
/* else we search further... */
}
/* ok, we have to decompose fML now... */
}
}
}
}
} /* thats all folks for the circular case... */
/* 44444444444444444444444444444444444444444444444444 */
/* */
/* array_flag = 4: interval i,j was found while */
/* tracing back through fc-array smaller than than cp */
/* or within this block */
/* */
/* 44444444444444444444444444444444444444444444444444 */
if (array_flag == 4) {
int ik, s5, s3, tmp_en;
if ((hc->up_ext[i]) &&
(fc[i + 1] != INF)) {
tmp_en = 0;
if (sc) {
if (sc->energy_up)
tmp_en += sc->energy_up[i][1];
if (sc->f)
tmp_en += sc->f(i, j, i + 1, j, VRNA_DECOMP_EXT_EXT, sc->data);
}
if (fc[i + 1] + tmp_en + best_energy <= threshold)
/* no basepair, nibbling of 5'-end */
fork_state(i + 1, j, state, tmp_en, 4, env);
}
for (k = i + turn + 1; k < j; k++) {
ik = indx[k] + i;
if ((with_gquad) &&
(fc[k + 1] != INF) &&
(ggg[ik] != INF)) {
if (fc[k + 1] + ggg[ik] + best_energy <= threshold) {
temp_state = derive_new_state(k + 1, j, state, 0, 4);
env->nopush = false;
repeat_gquad(vc, i, k, temp_state, 0, fc[k + 1], best_energy, threshold, env);
free_state_node(temp_state);
}
}
if ((hard_constraints[length * i + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) &&
(fc[k + 1] != INF) &&
(c[ik] != INF)) {
type = vrna_get_ptype(ik, ptype);
switch (dangle_model) {
case 0:
s5 = s3 = -1;
break;
default:
s5 = (i > 1) ? S1[i - 1] : -1;
s3 = S1[k + 1];
break;
}
element_energy = vrna_E_ext_stem(type, s5, s3, P);
if (sc) {
if (sc->f)
element_energy += sc->f(i, j, k, k + 1, VRNA_DECOMP_EXT_STEM_EXT, sc->data);
}
if (fc[k + 1] + c[ik] + element_energy + best_energy <= threshold) {
temp_state = derive_new_state(k + 1, j, state, 0, 4);
env->nopush = false;
repeat(vc, i, k, temp_state, element_energy, fc[k + 1], best_energy, threshold, env);
free_state_node(temp_state);
}
}
}
ik = indx[se[so[0]]] + i; /* indx[j] + i; */
if ((with_gquad) && (ggg[ik] != INF))
if (ggg[ik] + best_energy <= threshold)
repeat_gquad(vc, i, se[so[0]], state, 0, 0, best_energy, threshold, env);
if ((hard_constraints[length * i + se[so[0]]] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) &&
(c[ik] != INF)) {
type = vrna_get_ptype(ik, ptype);
s3 = -1;
switch (dangle_model) {
case 0:
s5 = -1;
break;
default:
s5 = (i > 1) ? S1[i - 1] : -1;
break;
}
element_energy = vrna_E_ext_stem(type, s5, s3, P);
if (sc) {
if (sc->f)
element_energy += sc->f(i, se[so[0]], i, se[so[0]], VRNA_DECOMP_EXT_STEM, sc->data);
}
if (c[ik] + element_energy + best_energy <= threshold)
repeat(vc, i, se[so[0]], state, element_energy, 0, best_energy, threshold, env);
}
} /* array_flag == 4 */
/* 55555555555555555555555555555555555555555555555555 */
/* */
/* array_flag = 5: interval cp=i,j was found while */
/* tracing back through fc-array greater than cp */
/* or within this block */
/* */
/* 55555555555555555555555555555555555555555555555555 */
if (array_flag == 5) {
int kj, s5, s3, tmp_en;
if ((hc->up_ext[j]) &&
(fc[j - 1] != INF)) {
tmp_en = 0;
if (sc) {
if (sc->energy_up)
tmp_en += sc->energy_up[j][1];
if (sc->f)
tmp_en += sc->f(i, j, i, j - 1, VRNA_DECOMP_EXT_EXT, sc->data);
}
if (fc[j - 1] + tmp_en + best_energy <= threshold)
/* no basepair, nibbling of 3'-end */
fork_state(i, j - 1, state, tmp_en, 5, env);
}
for (k = j - turn - 1; k > i; k--) {
kj = indx[j] + k;
if ((with_gquad) &&
(fc[k - 1] != INF) &&
(ggg[kj] != INF)) {
if (fc[k - 1] + ggg[kj] + best_energy <= threshold) {
temp_state = derive_new_state(i, k - 1, state, 0, 5);
env->nopush = false;
repeat_gquad(vc, k, j, temp_state, 0, fc[k - 1], best_energy, threshold, env);
free_state_node(temp_state);
}
}
if ((hard_constraints[length * j + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) &&
(fc[k - 1] != INF) &&
(c[kj] != INF)) {
type = vrna_get_ptype(kj, ptype);
element_energy = 0;
switch (dangle_model) {
case 0:
s3 = s5 = -1;
break;
default:
s5 = S1[k - 1];
s3 = (j < length) ? S1[j + 1] : -1;
break;
}
element_energy = vrna_E_ext_stem(type, s5, s3, P);
if (sc) {
if (sc->f)
element_energy += sc->f(i, j, k - 1, k, VRNA_DECOMP_EXT_EXT_STEM, sc->data);
}
if (fc[k - 1] + c[kj] + element_energy + best_energy <= threshold) {
temp_state = derive_new_state(i, k - 1, state, 0, 5);
env->nopush = false;
repeat(vc, k, j, temp_state, element_energy, fc[k - 1], best_energy, threshold, env);
free_state_node(temp_state);
}
}
}
kj = indx[j] + ss[so[1]]; /* indx[j] + i; */
if ((with_gquad) && (ggg[kj] != INF))
if (ggg[kj] + best_energy <= threshold)
repeat_gquad(vc, ss[so[1]], j, state, 0, 0, best_energy, threshold, env);
if ((hard_constraints[length * ss[so[1]] + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) &&
(c[kj] != INF)) {
type = vrna_get_ptype(kj, ptype);
s5 = -1;
switch (dangle_model) {
case 0:
s3 = -1;
break;
default:
s3 = (j < length) ? S1[j + 1] : -1;
break;
}
element_energy = vrna_E_ext_stem(type, s5, s3, P);
if (sc) {
if (sc->f)
element_energy += sc->f(ss[so[1]], j, ss[so[1]], j, VRNA_DECOMP_EXT_STEM, sc->data);
}
if (c[kj] + element_energy + best_energy <= threshold)
repeat(vc, ss[so[1]], j, state, element_energy, 0, best_energy, threshold, env);
}
} /* array_flag == 5 */
if (array_flag == 6) {
/* we have a gquad */
repeat_gquad(vc, i, j, state, 0, 0, best_energy, threshold, env);
if (env->nopush)
vrna_message_warning("%d,%d\nOops, no solution in gquad-repeat!", i, j);
return;
}
if (env->nopush) {
push_back(env->Stack, state);
env->nopush = false;
}
return;
}
/*---------------------------------------------------------------------------*/
PRIVATE void
repeat_gquad(vrna_fold_compound_t *vc,
int i,
int j,
STATE *state,
int part_energy,
int temp_energy,
int best_energy,
int threshold,
subopt_env *env)
{
unsigned int *sn;
int *ggg, *indx, element_energy;
short *S1;
vrna_param_t *P;
indx = vc->jindx;
sn = vc->strand_number;
ggg = vc->matrices->ggg;
S1 = vc->sequence_encoding;
P = vc->params;
/* find all gquads that fit into the energy range and the interval [i,j] */
STATE *new_state;
best_energy += part_energy; /* energy of current structural element */
best_energy += temp_energy; /* energy from unpushed interval */
if (sn[i] == sn[j]) {
element_energy = ggg[indx[j] + i];
if ((element_energy != INF) &&
(element_energy + best_energy <= threshold)) {
int cnt;
int *L;
int *l;
/* find out how many gquads we might expect in the interval [i,j] */
int num_gquads = get_gquad_count(S1, i, j);
num_gquads++;
L = (int *)vrna_alloc(sizeof(int) * num_gquads);
l = (int *)vrna_alloc(sizeof(int) * num_gquads * 3);
L[0] = -1;
get_gquad_pattern_exhaustive(S1, i, j, P, L, l, threshold - best_energy);
for (cnt = 0; L[cnt] != -1; cnt++) {
new_state = copy_state(state);
make_gquad(i, L[cnt], &(l[3 * cnt]), new_state);
new_state->partial_energy += part_energy;
new_state->partial_energy += element_energy;
/* new_state->best_energy =
* hairpin[unpaired] + element_energy + best_energy; */
push(env->Stack, new_state);
env->nopush = false;
}
free(L);
free(l);
}
}
best_energy -= part_energy;
best_energy -= temp_energy;
return;
}
PRIVATE void
repeat(vrna_fold_compound_t *vc,
int i,
int j,
STATE *state,
int part_energy,
int temp_energy,
int best_energy,
int threshold,
subopt_env *env)
{
/* routine to find stacks, bulges, internal loops and multiloops */
/* within interval closed by basepair i,j */
STATE *new_state;
vrna_param_t *P;
vrna_md_t *md;
register int ij, k, p, q, energy, new;
register int mm;
register int no_close, type, type_2;
char *ptype;
unsigned int n, *sn, *so, *ss, *se;
int element_energy;
int *fc, *c, *fML, *fM1, *ggg;
int rt, *indx, *rtype, noGUclosure, noLP, with_gquad, dangle_model, turn;
short *S1;
vrna_hc_t *hc;
vrna_sc_t *sc;
n = vc->length;
S1 = vc->sequence_encoding;
ptype = vc->ptype;
indx = vc->jindx;
sn = vc->strand_number;
so = vc->strand_order;
ss = vc->strand_start;
se = vc->strand_end;
P = vc->params;
md = &(P->model_details);
rtype = &(md->rtype[0]);
noGUclosure = md->noGUclosure;
noLP = md->noLP;
with_gquad = md->gquad;
dangle_model = md->dangles;
turn = md->min_loop_size;
fc = vc->matrices->fc;
c = vc->matrices->c;
fML = vc->matrices->fML;
fM1 = vc->matrices->fM1;
ggg = vc->matrices->ggg;
hc = vc->hc;
sc = vc->sc;
ij = indx[j] + i;
type = vrna_get_ptype(ij, ptype);
/*
* if (type==0) fprintf(stderr, "repeat: Warning: %d %d can't pair\n", i,j);
*/
no_close = (((type == 3) || (type == 4)) && noGUclosure);
if (hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) {
if (noLP) {
/* always consider the structure with additional stack */
if (i + turn + 2 < j) {
if (hc->mx[n * (i + 1) + j - 1] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC) {
type_2 = rtype[vrna_get_ptype(indx[j - 1] + i + 1, ptype)];
energy = 0;
if ((sn[i] == sn[i + 1]) && (sn[j - 1] == sn[j])) {
energy = E_IntLoop(0, 0, type, type_2, S1[i + 1], S1[j - 1], S1[i + 1], S1[j - 1], P);
if (sc) {
if (sc->energy_bp)
energy += sc->energy_bp[ij];
if (sc->energy_stack) {
energy += sc->energy_stack[i]
+ sc->energy_stack[i + 1]
+ sc->energy_stack[j - 1]
+ sc->energy_stack[j];
}
if (sc->f)
energy += sc->f(i, j, i + 1, j - 1, VRNA_DECOMP_PAIR_IL, sc->data);
}
new_state = derive_new_state(i + 1, j - 1, state, part_energy + energy, 2);
make_pair(i, j, new_state);
make_pair(i + 1, j - 1, new_state);
/* new_state->best_energy = new + best_energy; */
push(env->Stack, new_state);
env->nopush = false;
if (i == 1 || state->structure[i - 2] != '(' || state->structure[j] != ')')
/* adding a stack is the only possible structure */
return;
}
}
}
}
}
best_energy += part_energy; /* energy of current structural element */
best_energy += temp_energy; /* energy from unpushed interval */
if (hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) {
for (p = i + 1; p <= MIN2(j - 2 - turn, i + MAXLOOP + 1); p++) {
int minq = j - i + p - MAXLOOP - 2;
if (minq < p + 1 + turn)
minq = p + 1 + turn;
if (hc->up_int[i + 1] < (p - i - 1))
break;
for (q = j - 1; q >= minq; q--) {
if (hc->up_int[q + 1] < (j - q - 1))
break;
/* skip stack if noLP, since we've already processed it above */
if ((noLP) && (p == i + 1) && (q == j - 1))
continue;
if (!(hc->mx[n * p + q] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC))
continue;
if (c[indx[q] + p] == INF)
continue;
type_2 = vrna_get_ptype(indx[q] + p, ptype);
if (noGUclosure)
if (no_close || (type_2 == 3) || (type_2 == 4))
if ((p > i + 1) || (q < j - 1))
continue;
/* continue unless stack */
if ((sn[i] == sn[p]) && (sn[q] == sn[j])) {
energy = E_IntLoop(p - i - 1, j - q - 1, type, rtype[type_2],
S1[i + 1], S1[j - 1], S1[p - 1], S1[q + 1], P);
new = energy + c[indx[q] + p];
if (sc) {
if (sc->energy_up)
energy += sc->energy_up[i + 1][p - i - 1]
+ sc->energy_up[q + 1][j - q - 1];
if (sc->energy_bp)
energy += sc->energy_bp[ij];
if (sc->energy_stack) {
if ((p == i + 1) && (q == j - 1)) {
energy += sc->energy_stack[i]
+ sc->energy_stack[p]
+ sc->energy_stack[q]
+ sc->energy_stack[j];
}
}
if (sc->f)
energy += sc->f(i, j, p, q, VRNA_DECOMP_PAIR_IL, sc->data);
}
new = energy + c[indx[q] + p];
if (new + best_energy <= threshold)
/* stack, bulge, or interior loop */
fork_int_state(i, j, p, q, state, part_energy + energy, env);
} /*end of if block */
} /* end of q-loop */
} /* end of p-loop */
}
if (sn[i] != sn[j]) {
/*look in fc*/
if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) &&
(fc[i + 1] != INF) &&
(fc[j - 1] != INF)) {
rt = rtype[type];
element_energy = 0;
switch (dangle_model) {
case 0:
element_energy = vrna_E_ext_stem(rt, -1, -1, P);
break;
default:
element_energy =
vrna_E_ext_stem(rt,
(sn[j - 1] == sn[j]) ?
S1[j - 1] :
-1,
(sn[i] == sn[i + 1]) ?
S1[i + 1] :
-1,
P);
break;
}
if (fc[i + 1] + fc[j - 1] + element_energy + best_energy <= threshold)
fork_two_states_pair(i, j, ss[so[1]], state, part_energy + element_energy, 4, 5, env);
}
}
mm = P->MLclosing;
rt = rtype[type];
if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP) &&
((vc->strands < 2) || ((i != se[so[0]]) && (j != ss[so[1]])))) {
element_energy = mm;
switch (dangle_model) {
case 0:
element_energy = E_MLstem(rt, -1, -1, P) + mm;
break;
default:
element_energy = E_MLstem(rt, S1[j - 1], S1[i + 1], P) + mm;
break;
}
if (sc) {
if (sc->energy_bp)
element_energy += sc->energy_bp[ij];
if (sc->f)
element_energy += sc->f(i, j, i + 1, j - 1, VRNA_DECOMP_PAIR_ML, sc->data);
}
/* multiloop decomposition */
if ((sc) && (sc->f)) {
for (k = i + turn + 2; k <= j - turn - 2; k++) {
int eee = fML[indx[k - 1] + i + 1];
if ((eee != INF) && (fM1[indx[j - 1] + k] != INF)) {
eee += fM1[indx[j - 1] + k] +
best_energy;
int aux_eee = element_energy +
sc->f(i + 1, j - 1, k - 1, k, VRNA_DECOMP_ML_ML_ML, sc->data);
if ((eee + aux_eee) <= threshold)
fork_two_states_pair(i, j, k, state, part_energy + aux_eee, 1, 3, env);
}
}
} else {
for (k = i + turn + 2; k <= j - turn - 2; k++) {
int eee = fML[indx[k - 1] + i + 1];
if ((eee != INF) && (fM1[indx[j - 1] + k] != INF)) {
/* multiloop decomposition */
if ((eee + fM1[indx[j - 1] + k] +
element_energy + best_energy) <= threshold)
fork_two_states_pair(i, j, k, state, part_energy + element_energy, 1, 3, env);
}
}
}
}
if (sn[i] == sn[j]) {
if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_HP_LOOP) &&
(!no_close)) {
element_energy = vrna_E_hp_loop(vc, i, j);
if (element_energy != INF) {
if (element_energy + best_energy <= threshold)
/* hairpin structure */
fork_state_pair(i, j, state, part_energy + element_energy, env);
}
}
if (with_gquad) {
/* now we have to find all loops where (i,j) encloses a gquad in an interior loops style */
int cnt, *p, *q, *en, tmp_en;
p = q = en = NULL;
en =
E_GQuad_IntLoop_exhaustive(i, j, &p, &q, type, S1, ggg, threshold - best_energy, indx, P);
for (cnt = 0; p[cnt] != -1; cnt++) {
if ((hc->up_int[i + 1] >= p[cnt] - i - 1) && (hc->up_int[q[cnt] + 1] >= j - q[cnt] - 1)) {
tmp_en = en[cnt];
if (sc) {
if (sc->energy_bp)
tmp_en += sc->energy_bp[ij];
if (sc->energy_up)
tmp_en += sc->energy_up[i + 1][p[cnt] - i - 1]
+ sc->energy_up[q[cnt] + 1][j - q[cnt] - 1];
}
new_state = derive_new_state(p[cnt], q[cnt], state, tmp_en + part_energy, 6);
make_pair(i, j, new_state);
/* new_state->best_energy = new + best_energy; */
push(env->Stack, new_state);
env->nopush = false;
}
}
free(en);
free(p);
free(q);
}
}
best_energy -= part_energy;
best_energy -= temp_energy;
return;
}
PRIVATE void
old_subopt_print(const char *structure,
float energy,
void *data)
{
struct old_subopt_dat *d = (struct old_subopt_dat *)data;
if (structure && d->fp) {
char *e_string = vrna_strdup_printf(" %6.2f", energy);
print_structure(d->fp, structure, e_string);
free(e_string);
}
}
PRIVATE void
old_subopt_store(const char *structure,
float energy,
void *data)
{
struct old_subopt_dat *d = (struct old_subopt_dat *)data;
/* store solution */
if (d->n_sol + 1 == d->max_sol) {
d->max_sol *= 2;
d->SolutionList = (SOLUTION *)vrna_realloc(d->SolutionList, d->max_sol * sizeof(SOLUTION));
}
if (structure) {
d->SolutionList[d->n_sol].energy = energy;
d->SolutionList[d->n_sol++].structure = strdup(structure);
} else {
d->SolutionList[d->n_sol].energy = 0;
d->SolutionList[d->n_sol++].structure = NULL;
}
}
PRIVATE void
old_subopt_store_compressed(const char *structure,
float energy,
void *data)
{
struct old_subopt_dat *d = (struct old_subopt_dat *)data;
/* store solution */
if (d->n_sol + 1 == d->max_sol) {
d->max_sol *= 2;
d->SolutionList = (SOLUTION *)vrna_realloc(d->SolutionList, d->max_sol * sizeof(SOLUTION));
}
if (structure) {
d->SolutionList[d->n_sol].energy = energy;
if (d->cp > 0) {
int cp = d->cp;
char *s = vrna_cut_point_remove(structure, &cp);
d->SolutionList[d->n_sol++].structure = vrna_db_pack(s);
free(s);
} else {
d->SolutionList[d->n_sol++].structure = vrna_db_pack(structure);
}
} else {
d->SolutionList[d->n_sol].energy = 0;
d->SolutionList[d->n_sol++].structure = NULL;
}
}
/*###########################################*/
/*# deprecated functions below #*/
/*###########################################*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
PUBLIC SOLUTION *
subopt(char *seq,
char *structure,
int delta,
FILE *fp)
{
return wrap_subopt(seq, structure, NULL, delta, fold_constrained, 0, fp);
}
PUBLIC SOLUTION *
subopt_circ(char *seq,
char *structure,
int delta,
FILE *fp)
{
return wrap_subopt(seq, structure, NULL, delta, fold_constrained, 1, fp);
}
PUBLIC SOLUTION *
subopt_par(char *seq,
char *structure,
vrna_param_t *parameters,
int delta,
int is_constrained,
int is_circular,
FILE *fp)
{
return wrap_subopt(seq, structure, parameters, delta, is_constrained, is_circular, fp);
}
PRIVATE SOLUTION *
wrap_subopt(char *string,
char *structure,
vrna_param_t *parameters,
int delta,
int is_constrained,
int is_circular,
FILE *fp)
{
vrna_fold_compound_t *vc;
vrna_param_t *P;
char *seq;
#ifdef _OPENMP
/* Explicitly turn off dynamic threads */
omp_set_dynamic(0);
#endif
/* we need the parameter structure for hard constraints */
if (parameters) {
P = vrna_params_copy(parameters);
} else {
vrna_md_t md;
set_model_details(&md);
md.temperature = temperature;
P = vrna_params(&md);
}
P->model_details.circ = is_circular;
P->model_details.uniq_ML = uniq_ML = 1;
/* what about cofold sequences here? Is it safe to call the below cut_point_insert() ? */
/* dirty hack to reinsert the '&' according to the global variable 'cut_point' */
seq = vrna_cut_point_insert(string, cut_point);
vc =
vrna_fold_compound(seq,
&(P->model_details),
((is_circular == 0) ? VRNA_OPTION_HYBRID : VRNA_OPTION_DEFAULT));
if (parameters) {
/* replace params if necessary */
free(vc->params);
vc->params = P;
} else {
free(P);
}
/* handle hard constraints in pseudo dot-bracket format if passed via simple interface */
if (is_constrained && structure) {
unsigned int constraint_options = 0;
constraint_options |= VRNA_CONSTRAINT_DB
| VRNA_CONSTRAINT_DB_PIPE
| VRNA_CONSTRAINT_DB_DOT
| VRNA_CONSTRAINT_DB_X
| VRNA_CONSTRAINT_DB_ANG_BRACK
| VRNA_CONSTRAINT_DB_RND_BRACK
| VRNA_CONSTRAINT_DB_INTRAMOL
| VRNA_CONSTRAINT_DB_INTERMOL;
vrna_constraints_add(vc, (const char *)structure, constraint_options);
}
if (backward_compat_compound && backward_compat)
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = vc;
backward_compat = 1;
/* cleanup */
free(seq);
return vrna_subopt(vc, delta, subopt_sorted, fp);
}
#endif
/*---------------------------------------------------------------------------*/
/* Well, that is the end!----------------------------------------------------*/
/*---------------------------------------------------------------------------*/
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/effect.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/layer.h"
#include "magick/list.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/resource_.h"
#include "magick/resize.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o O r i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoOrientImage() adjusts an image so that its orientation is suitable for
% viewing (i.e. top-left orientation).
%
% The format of the AutoOrientImage method is:
%
% Image *AutoOrientImage(const Image *image,
% const OrientationType orientation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o orientation: Current image orientation.
%
% o exception: Return any errors or warnings in this structure.
%
*/
MagickExport Image *AutoOrientImage(const Image *image,
const OrientationType orientation,ExceptionInfo *exception)
{
Image
*orient_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
orient_image=(Image *) NULL;
switch(orientation)
{
case UndefinedOrientation:
case TopLeftOrientation:
default:
{
orient_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case TopRightOrientation:
{
orient_image=FlopImage(image,exception);
break;
}
case BottomRightOrientation:
{
orient_image=RotateImage(image,180.0,exception);
break;
}
case BottomLeftOrientation:
{
orient_image=FlipImage(image,exception);
break;
}
case LeftTopOrientation:
{
orient_image=TransposeImage(image,exception);
break;
}
case RightTopOrientation:
{
orient_image=RotateImage(image,90.0,exception);
break;
}
case RightBottomOrientation:
{
orient_image=TransverseImage(image,exception);
break;
}
case LeftBottomOrientation:
{
orient_image=RotateImage(image,270.0,exception);
break;
}
}
if (orient_image != (Image *) NULL)
orient_image->orientation=TopLeftOrientation;
return(orient_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
chop_view=AcquireAuthenticCacheView(chop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,chop_image,extent.y,1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict chop_indexes,
*magick_restrict indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=GetPixelIndex(indexes+x);
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict chop_indexes,
*magick_restrict indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
*q=(*p);
if (indexes != (IndexPacket *) NULL)
{
if (chop_indexes != (IndexPacket *) NULL)
*chop_indexes++=GetPixelIndex(indexes+x);
}
q++;
}
p++;
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
if (status == MagickFalse)
chop_image=DestroyImage(chop_image);
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
ssize_t
i;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cmyk_images=NewImageList();
for (i=0; i < (ssize_t) GetImageListLength(images); i+=4)
{
cmyk_image=CloneImage(images,0,0,MagickTrue,exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace);
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->green=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
q->blue=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewAuthenticIndexQueue(cmyk_view);
for (x=0; x < (ssize_t) images->columns; x++)
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange-
GetPixelIntensity(images,p)));
p++;
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
AppendImageToList(&cmyk_images,cmyk_image);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
if (((ssize_t) (bounding_box.x+bounding_box.width) > (ssize_t) image->page.width) ||
((ssize_t) (bounding_box.y+bounding_box.height) > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
crop_view=AcquireAuthenticCacheView(crop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,crop_image,crop_image->rows,1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict crop_indexes;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
crop_indexes=GetCacheViewAuthenticIndexQueue(crop_view);
(void) memcpy(q,p,(size_t) crop_image->columns*sizeof(*p));
if ((indexes != (IndexPacket *) NULL) &&
(crop_indexes != (IndexPacket *) NULL))
(void) memcpy(crop_indexes,indexes,(size_t) crop_image->columns*
sizeof(*crop_indexes));
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CropImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t PixelRoundOffset(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(CastDoubleToLong(floor(x)));
return(CastDoubleToLong(ceil(x)));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=PixelRoundOffset((MagickRealType) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) PixelRoundOffset((MagickRealType) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=PixelRoundOffset((MagickRealType) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) PixelRoundOffset((MagickRealType) (offset.y+
(geometry.y < 0 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=PixelRoundOffset((MagickRealType) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) PixelRoundOffset((MagickRealType) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=PixelRoundOffset((MagickRealType) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) PixelRoundOffset((MagickRealType) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next != (Image *) NULL)
AppendImageToList(&crop_image,next);
}
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,excerpt_image,excerpt_image->rows,1)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict excerpt_indexes,
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) memcpy(q,p,(size_t) excerpt_image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
excerpt_indexes=GetCacheViewAuthenticIndexQueue(excerpt_view);
if (excerpt_indexes != (IndexPacket *) NULL)
(void) memcpy(excerpt_indexes,indexes,(size_t)
excerpt_image->columns*sizeof(*excerpt_indexes));
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
MagickBooleanType
status;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
(void) DeleteImageProfile(extent_image,"8bim"); /* delete clipping path */
status=SetImageBackgroundColor(extent_image);
if (status == MagickFalse)
{
InheritException(exception,&extent_image->exception);
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
status=CompositeImage(extent_image,image->compose,image,-geometry->x,
-geometry->y);
if (status == MagickFalse)
{
InheritException(exception,&extent_image->exception);
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flip_image=CloneImage(image,0,0,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flip_view=AcquireAuthenticCacheView(flip_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flip_image,flip_image->rows,1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict flip_indexes;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) memcpy(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewVirtualIndexQueue(image_view);
if (indexes != (const IndexPacket *) NULL)
{
flip_indexes=GetCacheViewAuthenticIndexQueue(flip_view);
if (flip_indexes != (IndexPacket *) NULL)
(void) memcpy(flip_indexes,indexes,(size_t) image->columns*
sizeof(*flip_indexes));
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlipImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flop_image=CloneImage(image,0,0,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flop_image,flop_image->rows,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict flop_indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=flop_image->columns;
indexes=GetCacheViewVirtualIndexQueue(image_view);
flop_indexes=GetCacheViewAuthenticIndexQueue(flop_view);
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
(*--q)=(*p++);
if ((indexes != (const IndexPacket *) NULL) &&
(flop_indexes != (IndexPacket *) NULL))
SetPixelIndex(flop_indexes+flop_image->columns-x-1,
GetPixelIndex(indexes+x));
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy,
const ssize_t dx,const ssize_t dy,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
if (columns == 0)
return(MagickTrue);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source,exception);
destination_view=AcquireAuthenticCacheView(destination,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,destination,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict destination_indexes;
PixelPacket
*magick_restrict q;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source_view);
(void) memcpy(q,p,(size_t) columns*sizeof(*p));
if (indexes != (IndexPacket *) NULL)
{
destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view);
if (destination_indexes != (IndexPacket *) NULL)
(void) memcpy(destination_indexes,indexes,(size_t)
columns*sizeof(*indexes));
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
roll_image=CloneImage(image,0,0,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
columns,
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass) == MagickFalse)
{
InheritException(exception,&splice_image->exception);
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
(void) SetImageBackgroundColor(splice_image);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case StaticGravity:
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_geometry.y,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict indexes,
*magick_restrict splice_indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_image->rows,1)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict indexes,
*magick_restrict splice_indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
if ((y < 0) || (y >= (ssize_t)splice_image->rows))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
splice_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view);
for (x=0; x < columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q++;
for ( ; x < (ssize_t) splice_image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes));
indexes++;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
/*
DANGER: This function destroys what it assumes to be a single image list.
If the input image is part of a larger list, all other images in that list
will be simply 'lost', not destroyed.
Also if the crop generates a list of images only the first image is resized.
And finally if the crop succeeds and the resize failed, you will get a
cropped image, as well as a 'false' or 'failed' report.
This function and should probably be deprecated in favor of direct calls
to CropImageToTiles() or ResizeImage(), as appropriate.
*/
MagickExport MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry)
{
Image
*resize_image,
*transform_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,&(*image)->exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
flags=ParseRegionGeometry(transform_image,image_geometry,&geometry,
&(*image)->exception);
(void) flags;
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,transform_image->blur,&(*image)->exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImages() calls TransformImage() on each image of a sequence.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImages(Image **image,
% const char *crop_geometry,const char *image_geometry)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
*/
MagickExport MagickBooleanType TransformImages(Image **images,
const char *crop_geometry,const char *image_geometry)
{
Image
*image,
**image_list,
*transform_images;
MagickStatusType
status;
ssize_t
i;
assert(images != (Image **) NULL);
assert((*images)->signature == MagickCoreSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
image_list=ImageListToArray(*images,&(*images)->exception);
if (image_list == (Image **) NULL)
return(MagickFalse);
status=MagickTrue;
transform_images=NewImageList();
for (i=0; image_list[i] != (Image *) NULL; i++)
{
image=image_list[i];
status&=TransformImage(&image,crop_geometry,image_geometry);
AppendImageToList(&transform_images,image);
}
*images=transform_images;
image_list=(Image **) RelinquishMagickMemory(image_list);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transpose_view=AcquireAuthenticCacheView(transpose_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transpose_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict transpose_indexes,
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
(void) memcpy(q,p,(size_t) image->columns*sizeof(*q));
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transpose_indexes=GetCacheViewAuthenticIndexQueue(transpose_view);
if (transpose_indexes != (IndexPacket *) NULL)
(void) memcpy(transpose_indexes,indexes,(size_t)
image->columns*sizeof(*transpose_indexes));
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransposeImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transverse_view=AcquireAuthenticCacheView(transverse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transverse_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict transverse_indexes,
*magick_restrict indexes;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-
1),0,1,transverse_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
q+=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
*--q=(*p++);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
if (indexes != (IndexPacket *) NULL)
{
transverse_indexes=GetCacheViewAuthenticIndexQueue(transverse_view);
if (transverse_indexes != (IndexPacket *) NULL)
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(transverse_indexes+image->columns-x-1,
GetPixelIndex(indexes+x));
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(crop_image);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
return(CropImage(image,&geometry,exception));
}
|
irbuilder_nested_parallel_for.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-enable-irbuilder -x c++ -emit-llvm %s -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -o - | FileCheck --check-prefixes=CHECK %s
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-enable-irbuilder -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -verify %s -emit-llvm -o - | FileCheck --check-prefixes=CHECK-DEBUG %s
// expected-no-diagnostics
// TODO: Teach the update script to check new functions too.
#ifndef HEADER
#define HEADER
// CHECK-LABEL: @_Z14parallel_for_0v(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK: omp_parallel:
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @_Z14parallel_for_0v..omp_par to void (i32*, i32*, ...)*))
// CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
// CHECK: omp.par.outlined.exit:
// CHECK-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK: omp.par.exit.split:
// CHECK-NEXT: ret void
//
// CHECK-DEBUG-LABEL: @_Z14parallel_for_0v(
// CHECK-DEBUG-NEXT: entry:
// CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]), !dbg [[DBG13:![0-9]+]]
// CHECK-DEBUG-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK-DEBUG: omp_parallel:
// CHECK-DEBUG-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @_Z14parallel_for_0v..omp_par to void (i32*, i32*, ...)*)), !dbg [[DBG14:![0-9]+]]
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]]
// CHECK-DEBUG: omp.par.outlined.exit:
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK-DEBUG: omp.par.exit.split:
// CHECK-DEBUG-NEXT: ret void, !dbg [[DBG18:![0-9]+]]
//
void parallel_for_0(void) {
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i) {
}
}
}
// CHECK-LABEL: @_Z14parallel_for_1Pfid(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[STRUCTARG17:%.*]] = alloca { i32*, double*, float** }, align 8
// CHECK-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8
// CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
// CHECK-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK: omp_parallel:
// CHECK-NEXT: [[GEP_A_ADDR18:%.*]] = getelementptr { i32*, double*, float** }, { i32*, double*, float** }* [[STRUCTARG17]], i32 0, i32 0
// CHECK-NEXT: store i32* [[A_ADDR]], i32** [[GEP_A_ADDR18]], align 8
// CHECK-NEXT: [[GEP_B_ADDR19:%.*]] = getelementptr { i32*, double*, float** }, { i32*, double*, float** }* [[STRUCTARG17]], i32 0, i32 1
// CHECK-NEXT: store double* [[B_ADDR]], double** [[GEP_B_ADDR19]], align 8
// CHECK-NEXT: [[GEP_R_ADDR20:%.*]] = getelementptr { i32*, double*, float** }, { i32*, double*, float** }* [[STRUCTARG17]], i32 0, i32 2
// CHECK-NEXT: store float** [[R_ADDR]], float*** [[GEP_R_ADDR20]], align 8
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, { i32*, double*, float** }*)* @_Z14parallel_for_1Pfid..omp_par.4 to void (i32*, i32*, ...)*), { i32*, double*, float** }* [[STRUCTARG17]])
// CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT16:%.*]]
// CHECK: omp.par.outlined.exit16:
// CHECK-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK: omp.par.exit.split:
// CHECK-NEXT: ret void
//
// CHECK-DEBUG-LABEL: @_Z14parallel_for_1Pfid(
// CHECK-DEBUG-NEXT: entry:
// CHECK-DEBUG-NEXT: [[STRUCTARG17:%.*]] = alloca { i32*, double*, float** }, align 8
// CHECK-DEBUG-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8
// CHECK-DEBUG-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
// CHECK-DEBUG-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata float** [[R_ADDR]], metadata [[META72:![0-9]+]], metadata !DIExpression()), !dbg [[DBG73:![0-9]+]]
// CHECK-DEBUG-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[A_ADDR]], metadata [[META74:![0-9]+]], metadata !DIExpression()), !dbg [[DBG75:![0-9]+]]
// CHECK-DEBUG-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata double* [[B_ADDR]], metadata [[META76:![0-9]+]], metadata !DIExpression()), !dbg [[DBG77:![0-9]+]]
// CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB6:[0-9]+]]), !dbg [[DBG78:![0-9]+]]
// CHECK-DEBUG-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK-DEBUG: omp_parallel:
// CHECK-DEBUG-NEXT: [[GEP_A_ADDR18:%.*]] = getelementptr { i32*, double*, float** }, { i32*, double*, float** }* [[STRUCTARG17]], i32 0, i32 0
// CHECK-DEBUG-NEXT: store i32* [[A_ADDR]], i32** [[GEP_A_ADDR18]], align 8
// CHECK-DEBUG-NEXT: [[GEP_B_ADDR19:%.*]] = getelementptr { i32*, double*, float** }, { i32*, double*, float** }* [[STRUCTARG17]], i32 0, i32 1
// CHECK-DEBUG-NEXT: store double* [[B_ADDR]], double** [[GEP_B_ADDR19]], align 8
// CHECK-DEBUG-NEXT: [[GEP_R_ADDR20:%.*]] = getelementptr { i32*, double*, float** }, { i32*, double*, float** }* [[STRUCTARG17]], i32 0, i32 2
// CHECK-DEBUG-NEXT: store float** [[R_ADDR]], float*** [[GEP_R_ADDR20]], align 8
// CHECK-DEBUG-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB6]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, { i32*, double*, float** }*)* @_Z14parallel_for_1Pfid..omp_par.4 to void (i32*, i32*, ...)*), { i32*, double*, float** }* [[STRUCTARG17]]), !dbg [[DBG79:![0-9]+]]
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_OUTLINED_EXIT16:%.*]]
// CHECK-DEBUG: omp.par.outlined.exit16:
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK-DEBUG: omp.par.exit.split:
// CHECK-DEBUG-NEXT: ret void, !dbg [[DBG81:![0-9]+]]
//
void parallel_for_1(float *r, int a, double b) {
#pragma omp parallel
{
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i) {
*r = a + b;
}
}
}
}
// CHECK-LABEL: @_Z14parallel_for_2Pfid(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[STRUCTARG:%.*]] = alloca { i32*, double*, float** }, align 8
// CHECK-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: [[I185:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[AGG_CAPTURED186:%.*]] = alloca [[STRUCT_ANON_17:%.*]], align 8
// CHECK-NEXT: [[AGG_CAPTURED187:%.*]] = alloca [[STRUCT_ANON_18:%.*]], align 4
// CHECK-NEXT: [[DOTCOUNT_ADDR188:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[P_LASTITER203:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[P_LOWERBOUND204:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[P_UPPERBOUND205:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[P_STRIDE206:%.*]] = alloca i32, align 4
// CHECK-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8
// CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
// CHECK-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK: omp_parallel:
// CHECK-NEXT: [[GEP_A_ADDR:%.*]] = getelementptr { i32*, double*, float** }, { i32*, double*, float** }* [[STRUCTARG]], i32 0, i32 0
// CHECK-NEXT: store i32* [[A_ADDR]], i32** [[GEP_A_ADDR]], align 8
// CHECK-NEXT: [[GEP_B_ADDR:%.*]] = getelementptr { i32*, double*, float** }, { i32*, double*, float** }* [[STRUCTARG]], i32 0, i32 1
// CHECK-NEXT: store double* [[B_ADDR]], double** [[GEP_B_ADDR]], align 8
// CHECK-NEXT: [[GEP_R_ADDR:%.*]] = getelementptr { i32*, double*, float** }, { i32*, double*, float** }* [[STRUCTARG]], i32 0, i32 2
// CHECK-NEXT: store float** [[R_ADDR]], float*** [[GEP_R_ADDR]], align 8
// CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, { i32*, double*, float** }*)* @_Z14parallel_for_2Pfid..omp_par.23 to void (i32*, i32*, ...)*), { i32*, double*, float** }* [[STRUCTARG]])
// CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT184:%.*]]
// CHECK: omp.par.outlined.exit184:
// CHECK-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK: omp.par.exit.split:
// CHECK-NEXT: store i32 0, i32* [[I185]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON_17]], %struct.anon.17* [[AGG_CAPTURED186]], i32 0, i32 0
// CHECK-NEXT: store i32* [[I185]], i32** [[TMP0]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_18]], %struct.anon.18* [[AGG_CAPTURED187]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[I185]], align 4
// CHECK-NEXT: store i32 [[TMP2]], i32* [[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt.19(i32* [[DOTCOUNT_ADDR188]], %struct.anon.17* [[AGG_CAPTURED186]])
// CHECK-NEXT: [[DOTCOUNT189:%.*]] = load i32, i32* [[DOTCOUNT_ADDR188]], align 4
// CHECK-NEXT: br label [[OMP_LOOP_PREHEADER190:%.*]]
// CHECK: omp_loop.preheader190:
// CHECK-NEXT: store i32 0, i32* [[P_LOWERBOUND204]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[DOTCOUNT189]], 1
// CHECK-NEXT: store i32 [[TMP3]], i32* [[P_UPPERBOUND205]], align 4
// CHECK-NEXT: store i32 1, i32* [[P_STRIDE206]], align 4
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM207:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM207]], i32 34, i32* [[P_LASTITER203]], i32* [[P_LOWERBOUND204]], i32* [[P_UPPERBOUND205]], i32* [[P_STRIDE206]], i32 1, i32 0)
// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[P_LOWERBOUND204]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[P_UPPERBOUND205]], align 4
// CHECK-NEXT: [[TMP6:%.*]] = sub i32 [[TMP5]], [[TMP4]]
// CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 1
// CHECK-NEXT: br label [[OMP_LOOP_HEADER191:%.*]]
// CHECK: omp_loop.header191:
// CHECK-NEXT: [[OMP_LOOP_IV197:%.*]] = phi i32 [ 0, [[OMP_LOOP_PREHEADER190]] ], [ [[OMP_LOOP_NEXT199:%.*]], [[OMP_LOOP_INC194:%.*]] ]
// CHECK-NEXT: br label [[OMP_LOOP_COND192:%.*]]
// CHECK: omp_loop.cond192:
// CHECK-NEXT: [[OMP_LOOP_CMP198:%.*]] = icmp ult i32 [[OMP_LOOP_IV197]], [[TMP7]]
// CHECK-NEXT: br i1 [[OMP_LOOP_CMP198]], label [[OMP_LOOP_BODY193:%.*]], label [[OMP_LOOP_EXIT195:%.*]]
// CHECK: omp_loop.body193:
// CHECK-NEXT: [[TMP8:%.*]] = add i32 [[OMP_LOOP_IV197]], [[TMP4]]
// CHECK-NEXT: call void @__captured_stmt.20(i32* [[I185]], i32 [[TMP8]], %struct.anon.18* [[AGG_CAPTURED187]])
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4
// CHECK-NEXT: [[CONV200:%.*]] = sitofp i32 [[TMP9]] to double
// CHECK-NEXT: [[TMP10:%.*]] = load double, double* [[B_ADDR]], align 8
// CHECK-NEXT: [[ADD201:%.*]] = fadd double [[CONV200]], [[TMP10]]
// CHECK-NEXT: [[CONV202:%.*]] = fptrunc double [[ADD201]] to float
// CHECK-NEXT: [[TMP11:%.*]] = load float*, float** [[R_ADDR]], align 8
// CHECK-NEXT: store float [[CONV202]], float* [[TMP11]], align 4
// CHECK-NEXT: br label [[OMP_LOOP_INC194]]
// CHECK: omp_loop.inc194:
// CHECK-NEXT: [[OMP_LOOP_NEXT199]] = add nuw i32 [[OMP_LOOP_IV197]], 1
// CHECK-NEXT: br label [[OMP_LOOP_HEADER191]]
// CHECK: omp_loop.exit195:
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM207]])
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM208:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM208]])
// CHECK-NEXT: br label [[OMP_LOOP_AFTER196:%.*]]
// CHECK: omp_loop.after196:
// CHECK-NEXT: ret void
//
// CHECK-DEBUG-LABEL: @_Z14parallel_for_2Pfid(
// CHECK-DEBUG-NEXT: entry:
// CHECK-DEBUG-NEXT: [[STRUCTARG:%.*]] = alloca { i32*, double*, float** }, align 8
// CHECK-DEBUG-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8
// CHECK-DEBUG-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[B_ADDR:%.*]] = alloca double, align 8
// CHECK-DEBUG-NEXT: [[I185:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[AGG_CAPTURED186:%.*]] = alloca [[STRUCT_ANON_17:%.*]], align 8
// CHECK-DEBUG-NEXT: [[AGG_CAPTURED187:%.*]] = alloca [[STRUCT_ANON_18:%.*]], align 4
// CHECK-DEBUG-NEXT: [[DOTCOUNT_ADDR188:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[P_LASTITER203:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[P_LOWERBOUND204:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[P_UPPERBOUND205:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: [[P_STRIDE206:%.*]] = alloca i32, align 4
// CHECK-DEBUG-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata float** [[R_ADDR]], metadata [[META133:![0-9]+]], metadata !DIExpression()), !dbg [[DBG134:![0-9]+]]
// CHECK-DEBUG-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[A_ADDR]], metadata [[META135:![0-9]+]], metadata !DIExpression()), !dbg [[DBG136:![0-9]+]]
// CHECK-DEBUG-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata double* [[B_ADDR]], metadata [[META137:![0-9]+]], metadata !DIExpression()), !dbg [[DBG138:![0-9]+]]
// CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB13:[0-9]+]]), !dbg [[DBG139:![0-9]+]]
// CHECK-DEBUG-NEXT: br label [[OMP_PARALLEL:%.*]]
// CHECK-DEBUG: omp_parallel:
// CHECK-DEBUG-NEXT: [[GEP_A_ADDR:%.*]] = getelementptr { i32*, double*, float** }, { i32*, double*, float** }* [[STRUCTARG]], i32 0, i32 0
// CHECK-DEBUG-NEXT: store i32* [[A_ADDR]], i32** [[GEP_A_ADDR]], align 8
// CHECK-DEBUG-NEXT: [[GEP_B_ADDR:%.*]] = getelementptr { i32*, double*, float** }, { i32*, double*, float** }* [[STRUCTARG]], i32 0, i32 1
// CHECK-DEBUG-NEXT: store double* [[B_ADDR]], double** [[GEP_B_ADDR]], align 8
// CHECK-DEBUG-NEXT: [[GEP_R_ADDR:%.*]] = getelementptr { i32*, double*, float** }, { i32*, double*, float** }* [[STRUCTARG]], i32 0, i32 2
// CHECK-DEBUG-NEXT: store float** [[R_ADDR]], float*** [[GEP_R_ADDR]], align 8
// CHECK-DEBUG-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB13]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, { i32*, double*, float** }*)* @_Z14parallel_for_2Pfid..omp_par.23 to void (i32*, i32*, ...)*), { i32*, double*, float** }* [[STRUCTARG]]), !dbg [[DBG140:![0-9]+]]
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_OUTLINED_EXIT184:%.*]]
// CHECK-DEBUG: omp.par.outlined.exit184:
// CHECK-DEBUG-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]]
// CHECK-DEBUG: omp.par.exit.split:
// CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[I185]], metadata [[META144:![0-9]+]], metadata !DIExpression()), !dbg [[DBG147:![0-9]+]]
// CHECK-DEBUG-NEXT: store i32 0, i32* [[I185]], align 4, !dbg [[DBG147]]
// CHECK-DEBUG-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON_17]], %struct.anon.17* [[AGG_CAPTURED186]], i32 0, i32 0, !dbg [[DBG148:![0-9]+]]
// CHECK-DEBUG-NEXT: store i32* [[I185]], i32** [[TMP0]], align 8, !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_18]], %struct.anon.18* [[AGG_CAPTURED187]], i32 0, i32 0, !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: [[TMP2:%.*]] = load i32, i32* [[I185]], align 4, !dbg [[DBG149:![0-9]+]]
// CHECK-DEBUG-NEXT: store i32 [[TMP2]], i32* [[TMP1]], align 4, !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: call void @__captured_stmt.19(i32* [[DOTCOUNT_ADDR188]], %struct.anon.17* [[AGG_CAPTURED186]]), !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: [[DOTCOUNT189:%.*]] = load i32, i32* [[DOTCOUNT_ADDR188]], align 4, !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: br label [[OMP_LOOP_PREHEADER190:%.*]], !dbg [[DBG148]]
// CHECK-DEBUG: omp_loop.preheader190:
// CHECK-DEBUG-NEXT: store i32 0, i32* [[P_LOWERBOUND204]], align 4, !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: [[TMP3:%.*]] = sub i32 [[DOTCOUNT189]], 1, !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: store i32 [[TMP3]], i32* [[P_UPPERBOUND205]], align 4, !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: store i32 1, i32* [[P_STRIDE206]], align 4, !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM207:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB42:[0-9]+]]), !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB42]], i32 [[OMP_GLOBAL_THREAD_NUM207]], i32 34, i32* [[P_LASTITER203]], i32* [[P_LOWERBOUND204]], i32* [[P_UPPERBOUND205]], i32* [[P_STRIDE206]], i32 1, i32 0), !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: [[TMP4:%.*]] = load i32, i32* [[P_LOWERBOUND204]], align 4, !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: [[TMP5:%.*]] = load i32, i32* [[P_UPPERBOUND205]], align 4, !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: [[TMP6:%.*]] = sub i32 [[TMP5]], [[TMP4]], !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 1, !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: br label [[OMP_LOOP_HEADER191:%.*]], !dbg [[DBG148]]
// CHECK-DEBUG: omp_loop.header191:
// CHECK-DEBUG-NEXT: [[OMP_LOOP_IV197:%.*]] = phi i32 [ 0, [[OMP_LOOP_PREHEADER190]] ], [ [[OMP_LOOP_NEXT199:%.*]], [[OMP_LOOP_INC194:%.*]] ], !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: br label [[OMP_LOOP_COND192:%.*]], !dbg [[DBG148]]
// CHECK-DEBUG: omp_loop.cond192:
// CHECK-DEBUG-NEXT: [[OMP_LOOP_CMP198:%.*]] = icmp ult i32 [[OMP_LOOP_IV197]], [[TMP7]], !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: br i1 [[OMP_LOOP_CMP198]], label [[OMP_LOOP_BODY193:%.*]], label [[OMP_LOOP_EXIT195:%.*]], !dbg [[DBG148]]
// CHECK-DEBUG: omp_loop.body193:
// CHECK-DEBUG-NEXT: [[TMP8:%.*]] = add i32 [[OMP_LOOP_IV197]], [[TMP4]], !dbg [[DBG150:![0-9]+]]
// CHECK-DEBUG-NEXT: call void @__captured_stmt.20(i32* [[I185]], i32 [[TMP8]], %struct.anon.18* [[AGG_CAPTURED187]]), !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !dbg [[DBG151:![0-9]+]]
// CHECK-DEBUG-NEXT: [[CONV200:%.*]] = sitofp i32 [[TMP9]] to double, !dbg [[DBG151]]
// CHECK-DEBUG-NEXT: [[TMP10:%.*]] = load double, double* [[B_ADDR]], align 8, !dbg [[DBG150]]
// CHECK-DEBUG-NEXT: [[ADD201:%.*]] = fadd double [[CONV200]], [[TMP10]], !dbg [[DBG152:![0-9]+]]
// CHECK-DEBUG-NEXT: [[CONV202:%.*]] = fptrunc double [[ADD201]] to float, !dbg [[DBG151]]
// CHECK-DEBUG-NEXT: [[TMP11:%.*]] = load float*, float** [[R_ADDR]], align 8, !dbg [[DBG153:![0-9]+]]
// CHECK-DEBUG-NEXT: store float [[CONV202]], float* [[TMP11]], align 4, !dbg [[DBG154:![0-9]+]]
// CHECK-DEBUG-NEXT: br label [[OMP_LOOP_INC194]], !dbg [[DBG148]]
// CHECK-DEBUG: omp_loop.inc194:
// CHECK-DEBUG-NEXT: [[OMP_LOOP_NEXT199]] = add nuw i32 [[OMP_LOOP_IV197]], 1, !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: br label [[OMP_LOOP_HEADER191]], !dbg [[DBG148]]
// CHECK-DEBUG: omp_loop.exit195:
// CHECK-DEBUG-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB42]], i32 [[OMP_GLOBAL_THREAD_NUM207]]), !dbg [[DBG148]]
// CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM208:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB42]]), !dbg [[DBG150]]
// CHECK-DEBUG-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB43:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM208]]), !dbg [[DBG150]]
// CHECK-DEBUG-NEXT: br label [[OMP_LOOP_AFTER196:%.*]], !dbg [[DBG148]]
// CHECK-DEBUG: omp_loop.after196:
// CHECK-DEBUG-NEXT: ret void, !dbg [[DBG155:![0-9]+]]
//
void parallel_for_2(float *r, int a, double b) {
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
}
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
}
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
}
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
}
#pragma omp for
for (int i = 0; i < 100; ++i)
*r = a + b;
}
#endif
|
team.c | /* Copyright (C) 2005-2020 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>.
This file is part of the GNU Offloading and Multi Processing Library
(libgomp).
Libgomp is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file handles the maintenance of threads in response to team
creation and termination. */
#include "libgomp.h"
#include "pool.h"
#include <stdlib.h>
#include <string.h>
#ifdef LIBGOMP_USE_PTHREADS
pthread_attr_t gomp_thread_attr;
/* This key is for the thread destructor. */
pthread_key_t gomp_thread_destructor;
/* This is the libgomp per-thread data structure. */
#if defined HAVE_TLS || defined USE_EMUTLS
__thread struct gomp_thread gomp_tls_data;
#else
pthread_key_t gomp_tls_key;
#endif
/* This structure is used to communicate across pthread_create. */
struct gomp_thread_start_data
{
void (*fn) (void *);
void *fn_data;
struct gomp_team_state ts;
struct gomp_task *task;
struct gomp_thread_pool *thread_pool;
unsigned int place;
bool nested;
pthread_t handle;
};
/* This function is a pthread_create entry point. This contains the idle
loop in which a thread waits to be called up to become part of a team. */
static void *
gomp_thread_start (void *xdata)
{
struct gomp_thread_start_data *data = xdata;
struct gomp_thread *thr;
struct gomp_thread_pool *pool;
void (*local_fn) (void *);
void *local_data;
#if defined HAVE_TLS || defined USE_EMUTLS
thr = &gomp_tls_data;
#else
struct gomp_thread local_thr;
thr = &local_thr;
pthread_setspecific (gomp_tls_key, thr);
#endif
gomp_sem_init (&thr->release, 0);
/* Extract what we need from data. */
local_fn = data->fn;
local_data = data->fn_data;
thr->thread_pool = data->thread_pool;
thr->ts = data->ts;
thr->task = data->task;
thr->place = data->place;
#ifdef GOMP_NEEDS_THREAD_HANDLE
thr->handle = data->handle;
#endif
thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release;
/* Make thread pool local. */
pool = thr->thread_pool;
if (data->nested)
{
struct gomp_team *team = thr->ts.team;
struct gomp_task *task = thr->task;
gomp_barrier_wait (&team->barrier);
local_fn (local_data);
gomp_team_barrier_wait_final (&team->barrier);
gomp_finish_task (task);
gomp_barrier_wait_last (&team->barrier);
}
else
{
pool->threads[thr->ts.team_id] = thr;
gomp_simple_barrier_wait (&pool->threads_dock);
do
{
struct gomp_team *team = thr->ts.team;
struct gomp_task *task = thr->task;
local_fn (local_data);
gomp_team_barrier_wait_final (&team->barrier);
gomp_finish_task (task);
gomp_simple_barrier_wait (&pool->threads_dock);
local_fn = thr->fn;
local_data = thr->data;
thr->fn = NULL;
}
while (local_fn);
}
gomp_sem_destroy (&thr->release);
pthread_detach (pthread_self ());
thr->thread_pool = NULL;
thr->task = NULL;
return NULL;
}
#endif
static inline struct gomp_team *
get_last_team (unsigned nthreads)
{
struct gomp_thread *thr = gomp_thread ();
if (thr->ts.team == NULL)
{
struct gomp_thread_pool *pool = gomp_get_thread_pool (thr, nthreads);
struct gomp_team *last_team = pool->last_team;
if (last_team != NULL && last_team->nthreads == nthreads)
{
pool->last_team = NULL;
return last_team;
}
}
return NULL;
}
/* Create a new team data structure. */
struct gomp_team *
gomp_new_team (unsigned nthreads)
{
struct gomp_team *team;
int i;
team = get_last_team (nthreads);
if (team == NULL)
{
size_t extra = sizeof (team->ordered_release[0])
+ sizeof (team->implicit_task[0]);
team = team_malloc (sizeof (*team) + nthreads * extra);
#ifndef HAVE_SYNC_BUILTINS
gomp_mutex_init (&team->work_share_list_free_lock);
#endif
gomp_barrier_init (&team->barrier, nthreads);
gomp_mutex_init (&team->task_lock);
team->nthreads = nthreads;
}
team->work_share_chunk = 8;
#ifdef HAVE_SYNC_BUILTINS
team->single_count = 0;
#endif
team->work_shares_to_free = &team->work_shares[0];
gomp_init_work_share (&team->work_shares[0], 0, nthreads);
team->work_shares[0].next_alloc = NULL;
team->work_share_list_free = NULL;
team->work_share_list_alloc = &team->work_shares[1];
for (i = 1; i < 7; i++)
team->work_shares[i].next_free = &team->work_shares[i + 1];
team->work_shares[i].next_free = NULL;
gomp_sem_init (&team->master_release, 0);
team->ordered_release = (void *) &team->implicit_task[nthreads];
team->ordered_release[0] = &team->master_release;
priority_queue_init (&team->task_queue);
team->task_count = 0;
team->task_queued_count = 0;
team->task_running_count = 0;
team->work_share_cancelled = 0;
team->team_cancelled = 0;
return team;
}
/* Free a team data structure. */
static void
free_team (struct gomp_team *team)
{
#ifndef HAVE_SYNC_BUILTINS
gomp_mutex_destroy (&team->work_share_list_free_lock);
#endif
gomp_barrier_destroy (&team->barrier);
gomp_mutex_destroy (&team->task_lock);
priority_queue_free (&team->task_queue);
team_free (team);
}
static void
gomp_free_pool_helper (void *thread_pool)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_thread_pool *pool
= (struct gomp_thread_pool *) thread_pool;
gomp_simple_barrier_wait_last (&pool->threads_dock);
gomp_sem_destroy (&thr->release);
thr->thread_pool = NULL;
thr->task = NULL;
#ifdef LIBGOMP_USE_PTHREADS
pthread_detach (pthread_self ());
pthread_exit (NULL);
#elif defined(__nvptx__)
asm ("exit;");
#elif defined(__AMDGCN__)
asm ("s_dcache_wb\n\t"
"s_endpgm");
#else
#error gomp_free_pool_helper must terminate the thread
#endif
}
/* Free a thread pool and release its threads. */
void
gomp_free_thread (void *arg __attribute__((unused)))
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_thread_pool *pool = thr->thread_pool;
if (pool)
{
if (pool->threads_used > 0)
{
int i;
for (i = 1; i < pool->threads_used; i++)
{
struct gomp_thread *nthr = pool->threads[i];
nthr->fn = gomp_free_pool_helper;
nthr->data = pool;
}
/* This barrier undocks threads docked on pool->threads_dock. */
gomp_simple_barrier_wait (&pool->threads_dock);
/* And this waits till all threads have called gomp_barrier_wait_last
in gomp_free_pool_helper. */
gomp_simple_barrier_wait (&pool->threads_dock);
/* Now it is safe to destroy the barrier and free the pool. */
gomp_simple_barrier_destroy (&pool->threads_dock);
#ifdef HAVE_SYNC_BUILTINS
__sync_fetch_and_add (&gomp_managed_threads,
1L - pool->threads_used);
#else
gomp_mutex_lock (&gomp_managed_threads_lock);
gomp_managed_threads -= pool->threads_used - 1L;
gomp_mutex_unlock (&gomp_managed_threads_lock);
#endif
}
if (pool->last_team)
free_team (pool->last_team);
#ifndef __nvptx__
team_free (pool->threads);
team_free (pool);
#endif
thr->thread_pool = NULL;
}
if (thr->ts.level == 0 && __builtin_expect (thr->ts.team != NULL, 0))
gomp_team_end ();
if (thr->task != NULL)
{
struct gomp_task *task = thr->task;
gomp_end_task ();
free (task);
}
}
/* Launch a team. */
#ifdef LIBGOMP_USE_PTHREADS
void
gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
unsigned flags, struct gomp_team *team,
struct gomp_taskgroup *taskgroup)
{
struct gomp_thread_start_data *start_data;
struct gomp_thread *thr, *nthr;
struct gomp_task *task;
struct gomp_task_icv *icv;
bool nested;
struct gomp_thread_pool *pool;
unsigned i, n, old_threads_used = 0;
pthread_attr_t thread_attr, *attr;
unsigned long nthreads_var;
char bind, bind_var;
unsigned int s = 0, rest = 0, p = 0, k = 0;
unsigned int affinity_count = 0;
struct gomp_thread **affinity_thr = NULL;
bool force_display = false;
thr = gomp_thread ();
nested = thr->ts.level;
pool = thr->thread_pool;
task = thr->task;
icv = task ? &task->icv : &gomp_global_icv;
if (__builtin_expect (gomp_places_list != NULL, 0) && thr->place == 0)
{
gomp_init_affinity ();
if (__builtin_expect (gomp_display_affinity_var, 0) && nthreads == 1)
gomp_display_affinity_thread (gomp_thread_self (), &thr->ts,
thr->place);
}
/* Always save the previous state, even if this isn't a nested team.
In particular, we should save any work share state from an outer
orphaned work share construct. */
team->prev_ts = thr->ts;
thr->ts.team = team;
thr->ts.team_id = 0;
++thr->ts.level;
if (nthreads > 1)
++thr->ts.active_level;
thr->ts.work_share = &team->work_shares[0];
thr->ts.last_work_share = NULL;
#ifdef HAVE_SYNC_BUILTINS
thr->ts.single_count = 0;
#endif
thr->ts.static_trip = 0;
thr->task = &team->implicit_task[0];
#ifdef GOMP_NEEDS_THREAD_HANDLE
thr->handle = pthread_self ();
#endif
nthreads_var = icv->nthreads_var;
if (__builtin_expect (gomp_nthreads_var_list != NULL, 0)
&& thr->ts.level < gomp_nthreads_var_list_len)
nthreads_var = gomp_nthreads_var_list[thr->ts.level];
bind_var = icv->bind_var;
if (bind_var != omp_proc_bind_false && (flags & 7) != omp_proc_bind_false)
bind_var = flags & 7;
bind = bind_var;
if (__builtin_expect (gomp_bind_var_list != NULL, 0)
&& thr->ts.level < gomp_bind_var_list_len)
bind_var = gomp_bind_var_list[thr->ts.level];
gomp_init_task (thr->task, task, icv);
thr->task->taskgroup = taskgroup;
team->implicit_task[0].icv.nthreads_var = nthreads_var;
team->implicit_task[0].icv.bind_var = bind_var;
if (nthreads == 1)
return;
i = 1;
if (__builtin_expect (gomp_places_list != NULL, 0))
{
/* Depending on chosen proc_bind model, set subpartition
for the master thread and initialize helper variables
P and optionally S, K and/or REST used by later place
computation for each additional thread. */
p = thr->place - 1;
switch (bind)
{
case omp_proc_bind_true:
case omp_proc_bind_close:
if (nthreads > thr->ts.place_partition_len)
{
/* T > P. S threads will be placed in each place,
and the final REM threads placed one by one
into the already occupied places. */
s = nthreads / thr->ts.place_partition_len;
rest = nthreads % thr->ts.place_partition_len;
}
else
s = 1;
k = 1;
break;
case omp_proc_bind_master:
/* Each thread will be bound to master's place. */
break;
case omp_proc_bind_spread:
if (nthreads <= thr->ts.place_partition_len)
{
/* T <= P. Each subpartition will have in between s
and s+1 places (subpartitions starting at or
after rest will have s places, earlier s+1 places),
each thread will be bound to the first place in
its subpartition (except for the master thread
that can be bound to another place in its
subpartition). */
s = thr->ts.place_partition_len / nthreads;
rest = thr->ts.place_partition_len % nthreads;
rest = (s + 1) * rest + thr->ts.place_partition_off;
if (p < rest)
{
p -= (p - thr->ts.place_partition_off) % (s + 1);
thr->ts.place_partition_len = s + 1;
}
else
{
p -= (p - rest) % s;
thr->ts.place_partition_len = s;
}
thr->ts.place_partition_off = p;
}
else
{
/* T > P. Each subpartition will have just a single
place and we'll place between s and s+1
threads into each subpartition. */
s = nthreads / thr->ts.place_partition_len;
rest = nthreads % thr->ts.place_partition_len;
thr->ts.place_partition_off = p;
thr->ts.place_partition_len = 1;
k = 1;
}
break;
}
}
else
bind = omp_proc_bind_false;
/* We only allow the reuse of idle threads for non-nested PARALLEL
regions. This appears to be implied by the semantics of
threadprivate variables, but perhaps that's reading too much into
things. Certainly it does prevent any locking problems, since
only the initial program thread will modify gomp_threads. */
if (!nested)
{
old_threads_used = pool->threads_used;
if (nthreads <= old_threads_used)
n = nthreads;
else if (old_threads_used == 0)
{
n = 0;
gomp_simple_barrier_init (&pool->threads_dock, nthreads);
}
else
{
n = old_threads_used;
/* Increase the barrier threshold to make sure all new
threads arrive before the team is released. */
gomp_simple_barrier_reinit (&pool->threads_dock, nthreads);
}
/* Not true yet, but soon will be. We're going to release all
threads from the dock, and those that aren't part of the
team will exit. */
pool->threads_used = nthreads;
/* If necessary, expand the size of the gomp_threads array. It is
expected that changes in the number of threads are rare, thus we
make no effort to expand gomp_threads_size geometrically. */
if (nthreads >= pool->threads_size)
{
pool->threads_size = nthreads + 1;
pool->threads
= gomp_realloc (pool->threads,
pool->threads_size
* sizeof (struct gomp_thread *));
/* Add current (master) thread to threads[]. */
pool->threads[0] = thr;
}
/* Release existing idle threads. */
for (; i < n; ++i)
{
unsigned int place_partition_off = thr->ts.place_partition_off;
unsigned int place_partition_len = thr->ts.place_partition_len;
unsigned int place = 0;
if (__builtin_expect (gomp_places_list != NULL, 0))
{
switch (bind)
{
case omp_proc_bind_true:
case omp_proc_bind_close:
if (k == s)
{
++p;
if (p == (team->prev_ts.place_partition_off
+ team->prev_ts.place_partition_len))
p = team->prev_ts.place_partition_off;
k = 1;
if (i == nthreads - rest)
s = 1;
}
else
++k;
break;
case omp_proc_bind_master:
break;
case omp_proc_bind_spread:
if (k == 0)
{
/* T <= P. */
if (p < rest)
p += s + 1;
else
p += s;
if (p == (team->prev_ts.place_partition_off
+ team->prev_ts.place_partition_len))
p = team->prev_ts.place_partition_off;
place_partition_off = p;
if (p < rest)
place_partition_len = s + 1;
else
place_partition_len = s;
}
else
{
/* T > P. */
if (k == s)
{
++p;
if (p == (team->prev_ts.place_partition_off
+ team->prev_ts.place_partition_len))
p = team->prev_ts.place_partition_off;
k = 1;
if (i == nthreads - rest)
s = 1;
}
else
++k;
place_partition_off = p;
place_partition_len = 1;
}
break;
}
if (affinity_thr != NULL
|| (bind != omp_proc_bind_true
&& pool->threads[i]->place != p + 1)
|| pool->threads[i]->place <= place_partition_off
|| pool->threads[i]->place > (place_partition_off
+ place_partition_len))
{
unsigned int l;
force_display = true;
if (affinity_thr == NULL)
{
unsigned int j;
if (team->prev_ts.place_partition_len > 64)
affinity_thr
= gomp_malloc (team->prev_ts.place_partition_len
* sizeof (struct gomp_thread *));
else
affinity_thr
= gomp_alloca (team->prev_ts.place_partition_len
* sizeof (struct gomp_thread *));
memset (affinity_thr, '\0',
team->prev_ts.place_partition_len
* sizeof (struct gomp_thread *));
for (j = i; j < old_threads_used; j++)
{
if (pool->threads[j]->place
> team->prev_ts.place_partition_off
&& (pool->threads[j]->place
<= (team->prev_ts.place_partition_off
+ team->prev_ts.place_partition_len)))
{
l = pool->threads[j]->place - 1
- team->prev_ts.place_partition_off;
pool->threads[j]->data = affinity_thr[l];
affinity_thr[l] = pool->threads[j];
}
pool->threads[j] = NULL;
}
if (nthreads > old_threads_used)
memset (&pool->threads[old_threads_used],
'\0', ((nthreads - old_threads_used)
* sizeof (struct gomp_thread *)));
n = nthreads;
affinity_count = old_threads_used - i;
}
if (affinity_count == 0)
break;
l = p;
if (affinity_thr[l - team->prev_ts.place_partition_off]
== NULL)
{
if (bind != omp_proc_bind_true)
continue;
for (l = place_partition_off;
l < place_partition_off + place_partition_len;
l++)
if (affinity_thr[l - team->prev_ts.place_partition_off]
!= NULL)
break;
if (l == place_partition_off + place_partition_len)
continue;
}
nthr = affinity_thr[l - team->prev_ts.place_partition_off];
affinity_thr[l - team->prev_ts.place_partition_off]
= (struct gomp_thread *) nthr->data;
affinity_count--;
pool->threads[i] = nthr;
}
else
nthr = pool->threads[i];
place = p + 1;
}
else
nthr = pool->threads[i];
nthr->ts.team = team;
nthr->ts.work_share = &team->work_shares[0];
nthr->ts.last_work_share = NULL;
nthr->ts.team_id = i;
nthr->ts.level = team->prev_ts.level + 1;
nthr->ts.active_level = thr->ts.active_level;
nthr->ts.place_partition_off = place_partition_off;
nthr->ts.place_partition_len = place_partition_len;
#ifdef HAVE_SYNC_BUILTINS
nthr->ts.single_count = 0;
#endif
nthr->ts.static_trip = 0;
nthr->task = &team->implicit_task[i];
nthr->place = place;
gomp_init_task (nthr->task, task, icv);
team->implicit_task[i].icv.nthreads_var = nthreads_var;
team->implicit_task[i].icv.bind_var = bind_var;
nthr->task->taskgroup = taskgroup;
nthr->fn = fn;
nthr->data = data;
team->ordered_release[i] = &nthr->release;
}
if (__builtin_expect (affinity_thr != NULL, 0))
{
/* If AFFINITY_THR is non-NULL just because we had to
permute some threads in the pool, but we've managed
to find exactly as many old threads as we'd find
without affinity, we don't need to handle this
specially anymore. */
if (nthreads <= old_threads_used
? (affinity_count == old_threads_used - nthreads)
: (i == old_threads_used))
{
if (team->prev_ts.place_partition_len > 64)
free (affinity_thr);
affinity_thr = NULL;
affinity_count = 0;
}
else
{
i = 1;
/* We are going to compute the places/subpartitions
again from the beginning. So, we need to reinitialize
vars modified by the switch (bind) above inside
of the loop, to the state they had after the initial
switch (bind). */
switch (bind)
{
case omp_proc_bind_true:
case omp_proc_bind_close:
if (nthreads > thr->ts.place_partition_len)
/* T > P. S has been changed, so needs
to be recomputed. */
s = nthreads / thr->ts.place_partition_len;
k = 1;
p = thr->place - 1;
break;
case omp_proc_bind_master:
/* No vars have been changed. */
break;
case omp_proc_bind_spread:
p = thr->ts.place_partition_off;
if (k != 0)
{
/* T > P. */
s = nthreads / team->prev_ts.place_partition_len;
k = 1;
}
break;
}
/* Increase the barrier threshold to make sure all new
threads and all the threads we're going to let die
arrive before the team is released. */
if (affinity_count)
gomp_simple_barrier_reinit (&pool->threads_dock,
nthreads + affinity_count);
}
}
if (i == nthreads)
goto do_release;
}
if (__builtin_expect (nthreads + affinity_count > old_threads_used, 0))
{
long diff = (long) (nthreads + affinity_count) - (long) old_threads_used;
if (old_threads_used == 0)
--diff;
#ifdef HAVE_SYNC_BUILTINS
__sync_fetch_and_add (&gomp_managed_threads, diff);
#else
gomp_mutex_lock (&gomp_managed_threads_lock);
gomp_managed_threads += diff;
gomp_mutex_unlock (&gomp_managed_threads_lock);
#endif
}
attr = &gomp_thread_attr;
if (__builtin_expect (gomp_places_list != NULL, 0))
{
size_t stacksize;
pthread_attr_init (&thread_attr);
if (! pthread_attr_getstacksize (&gomp_thread_attr, &stacksize))
pthread_attr_setstacksize (&thread_attr, stacksize);
attr = &thread_attr;
}
start_data = gomp_alloca (sizeof (struct gomp_thread_start_data)
* (nthreads - i));
/* Launch new threads. */
for (; i < nthreads; ++i)
{
int err;
start_data->ts.place_partition_off = thr->ts.place_partition_off;
start_data->ts.place_partition_len = thr->ts.place_partition_len;
start_data->place = 0;
if (__builtin_expect (gomp_places_list != NULL, 0))
{
switch (bind)
{
case omp_proc_bind_true:
case omp_proc_bind_close:
if (k == s)
{
++p;
if (p == (team->prev_ts.place_partition_off
+ team->prev_ts.place_partition_len))
p = team->prev_ts.place_partition_off;
k = 1;
if (i == nthreads - rest)
s = 1;
}
else
++k;
break;
case omp_proc_bind_master:
break;
case omp_proc_bind_spread:
if (k == 0)
{
/* T <= P. */
if (p < rest)
p += s + 1;
else
p += s;
if (p == (team->prev_ts.place_partition_off
+ team->prev_ts.place_partition_len))
p = team->prev_ts.place_partition_off;
start_data->ts.place_partition_off = p;
if (p < rest)
start_data->ts.place_partition_len = s + 1;
else
start_data->ts.place_partition_len = s;
}
else
{
/* T > P. */
if (k == s)
{
++p;
if (p == (team->prev_ts.place_partition_off
+ team->prev_ts.place_partition_len))
p = team->prev_ts.place_partition_off;
k = 1;
if (i == nthreads - rest)
s = 1;
}
else
++k;
start_data->ts.place_partition_off = p;
start_data->ts.place_partition_len = 1;
}
break;
}
start_data->place = p + 1;
if (affinity_thr != NULL && pool->threads[i] != NULL)
continue;
gomp_init_thread_affinity (attr, p);
}
start_data->fn = fn;
start_data->fn_data = data;
start_data->ts.team = team;
start_data->ts.work_share = &team->work_shares[0];
start_data->ts.last_work_share = NULL;
start_data->ts.team_id = i;
start_data->ts.level = team->prev_ts.level + 1;
start_data->ts.active_level = thr->ts.active_level;
#ifdef HAVE_SYNC_BUILTINS
start_data->ts.single_count = 0;
#endif
start_data->ts.static_trip = 0;
start_data->task = &team->implicit_task[i];
gomp_init_task (start_data->task, task, icv);
team->implicit_task[i].icv.nthreads_var = nthreads_var;
team->implicit_task[i].icv.bind_var = bind_var;
start_data->task->taskgroup = taskgroup;
start_data->thread_pool = pool;
start_data->nested = nested;
attr = gomp_adjust_thread_attr (attr, &thread_attr);
err = pthread_create (&start_data->handle, attr, gomp_thread_start,
start_data);
start_data++;
if (err != 0)
gomp_fatal ("Thread creation failed: %s", strerror (err));
}
if (__builtin_expect (attr == &thread_attr, 0))
pthread_attr_destroy (&thread_attr);
do_release:
if (nested)
gomp_barrier_wait (&team->barrier);
else
gomp_simple_barrier_wait (&pool->threads_dock);
/* Decrease the barrier threshold to match the number of threads
that should arrive back at the end of this team. The extra
threads should be exiting. Note that we arrange for this test
to never be true for nested teams. If AFFINITY_COUNT is non-zero,
the barrier as well as gomp_managed_threads was temporarily
set to NTHREADS + AFFINITY_COUNT. For NTHREADS < OLD_THREADS_COUNT,
AFFINITY_COUNT if non-zero will be always at least
OLD_THREADS_COUNT - NTHREADS. */
if (__builtin_expect (nthreads < old_threads_used, 0)
|| __builtin_expect (affinity_count, 0))
{
long diff = (long) nthreads - (long) old_threads_used;
if (affinity_count)
diff = -affinity_count;
gomp_simple_barrier_reinit (&pool->threads_dock, nthreads);
#ifdef HAVE_SYNC_BUILTINS
__sync_fetch_and_add (&gomp_managed_threads, diff);
#else
gomp_mutex_lock (&gomp_managed_threads_lock);
gomp_managed_threads += diff;
gomp_mutex_unlock (&gomp_managed_threads_lock);
#endif
}
if (__builtin_expect (gomp_display_affinity_var, 0))
{
if (nested
|| nthreads != old_threads_used
|| force_display)
{
gomp_display_affinity_thread (gomp_thread_self (), &thr->ts,
thr->place);
if (nested)
{
start_data -= nthreads - 1;
for (i = 1; i < nthreads; ++i)
{
gomp_display_affinity_thread (
#ifdef LIBGOMP_USE_PTHREADS
start_data->handle,
#else
gomp_thread_self (),
#endif
&start_data->ts,
start_data->place);
start_data++;
}
}
else
{
for (i = 1; i < nthreads; ++i)
{
gomp_thread_handle handle
= gomp_thread_to_pthread_t (pool->threads[i]);
gomp_display_affinity_thread (handle, &pool->threads[i]->ts,
pool->threads[i]->place);
}
}
}
}
if (__builtin_expect (affinity_thr != NULL, 0)
&& team->prev_ts.place_partition_len > 64)
free (affinity_thr);
}
#endif
/* Terminate the current team. This is only to be called by the master
thread. We assume that we must wait for the other threads. */
void
gomp_team_end (void)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
/* This barrier handles all pending explicit threads.
As #pragma omp cancel parallel might get awaited count in
team->barrier in a inconsistent state, we need to use a different
counter here. */
gomp_team_barrier_wait_final (&team->barrier);
if (__builtin_expect (team->team_cancelled, 0))
{
struct gomp_work_share *ws = team->work_shares_to_free;
do
{
struct gomp_work_share *next_ws = gomp_ptrlock_get (&ws->next_ws);
if (next_ws == NULL)
gomp_ptrlock_set (&ws->next_ws, ws);
gomp_fini_work_share (ws);
ws = next_ws;
}
while (ws != NULL);
}
else
gomp_fini_work_share (thr->ts.work_share);
gomp_end_task ();
thr->ts = team->prev_ts;
if (__builtin_expect (thr->ts.level != 0, 0))
{
#ifdef HAVE_SYNC_BUILTINS
__sync_fetch_and_add (&gomp_managed_threads, 1L - team->nthreads);
#else
gomp_mutex_lock (&gomp_managed_threads_lock);
gomp_managed_threads -= team->nthreads - 1L;
gomp_mutex_unlock (&gomp_managed_threads_lock);
#endif
/* This barrier has gomp_barrier_wait_last counterparts
and ensures the team can be safely destroyed. */
gomp_barrier_wait (&team->barrier);
}
if (__builtin_expect (team->work_shares[0].next_alloc != NULL, 0))
{
struct gomp_work_share *ws = team->work_shares[0].next_alloc;
do
{
struct gomp_work_share *next_ws = ws->next_alloc;
free (ws);
ws = next_ws;
}
while (ws != NULL);
}
gomp_sem_destroy (&team->master_release);
if (__builtin_expect (thr->ts.team != NULL, 0)
|| __builtin_expect (team->nthreads == 1, 0))
free_team (team);
else
{
struct gomp_thread_pool *pool = thr->thread_pool;
if (pool->last_team)
free_team (pool->last_team);
pool->last_team = team;
gomp_release_thread_pool (pool);
}
}
#ifdef LIBGOMP_USE_PTHREADS
/* Constructors for this file. */
static void __attribute__((constructor))
initialize_team (void)
{
#if !defined HAVE_TLS && !defined USE_EMUTLS
static struct gomp_thread initial_thread_tls_data;
pthread_key_create (&gomp_tls_key, NULL);
pthread_setspecific (gomp_tls_key, &initial_thread_tls_data);
#endif
if (pthread_key_create (&gomp_thread_destructor, gomp_free_thread) != 0)
gomp_fatal ("could not create thread pool destructor.");
}
static void __attribute__((destructor))
team_destructor (void)
{
/* Without this dlclose on libgomp could lead to subsequent
crashes. */
pthread_key_delete (gomp_thread_destructor);
}
/* Similar to gomp_free_pool_helper, but don't detach itself,
gomp_pause_host will pthread_join those threads. */
static void
gomp_pause_pool_helper (void *thread_pool)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_thread_pool *pool
= (struct gomp_thread_pool *) thread_pool;
gomp_simple_barrier_wait_last (&pool->threads_dock);
gomp_sem_destroy (&thr->release);
thr->thread_pool = NULL;
thr->task = NULL;
pthread_exit (NULL);
}
/* Free a thread pool and release its threads. Return non-zero on
failure. */
int
gomp_pause_host (void)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_thread_pool *pool = thr->thread_pool;
if (thr->ts.level)
return -1;
if (pool)
{
if (pool->threads_used > 0)
{
int i;
pthread_t *thrs
= gomp_alloca (sizeof (pthread_t) * pool->threads_used);
for (i = 1; i < pool->threads_used; i++)
{
struct gomp_thread *nthr = pool->threads[i];
nthr->fn = gomp_pause_pool_helper;
nthr->data = pool;
thrs[i] = gomp_thread_to_pthread_t (nthr);
}
/* This barrier undocks threads docked on pool->threads_dock. */
gomp_simple_barrier_wait (&pool->threads_dock);
/* And this waits till all threads have called gomp_barrier_wait_last
in gomp_pause_pool_helper. */
gomp_simple_barrier_wait (&pool->threads_dock);
/* Now it is safe to destroy the barrier and free the pool. */
gomp_simple_barrier_destroy (&pool->threads_dock);
#ifdef HAVE_SYNC_BUILTINS
__sync_fetch_and_add (&gomp_managed_threads,
1L - pool->threads_used);
#else
gomp_mutex_lock (&gomp_managed_threads_lock);
gomp_managed_threads -= pool->threads_used - 1L;
gomp_mutex_unlock (&gomp_managed_threads_lock);
#endif
for (i = 1; i < pool->threads_used; i++)
pthread_join (thrs[i], NULL);
}
if (pool->last_team)
free_team (pool->last_team);
#ifndef __nvptx__
team_free (pool->threads);
team_free (pool);
#endif
thr->thread_pool = NULL;
}
return 0;
}
#endif
struct gomp_task_icv *
gomp_new_icv (void)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_task *task = gomp_malloc (sizeof (struct gomp_task));
gomp_init_task (task, NULL, &gomp_global_icv);
thr->task = task;
#ifdef LIBGOMP_USE_PTHREADS
pthread_setspecific (gomp_thread_destructor, thr);
#endif
return &task->icv;
}
|
csr_matvec.c | /******************************************************************************
* Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "seq_mv.h"
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec
*--------------------------------------------------------------------------*/
/* y[offset:end] = alpha*A[offset:end,:]*x + beta*b[offset:end] */
HYPRE_Int
hypre_CSRMatrixMatvecOutOfPlaceHost( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *b,
hypre_Vector *y,
HYPRE_Int offset )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A) + offset;
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A) - offset;
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
/*HYPRE_Int num_nnz = hypre_CSRMatrixNumNonzeros(A);*/
HYPRE_Int *A_rownnz = hypre_CSRMatrixRownnz(A);
HYPRE_Int num_rownnz = hypre_CSRMatrixNumRownnz(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *b_data = hypre_VectorData(b) + offset;
HYPRE_Complex *y_data = hypre_VectorData(y) + offset;
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int b_size = hypre_VectorSize(b) - offset;
HYPRE_Int y_size = hypre_VectorSize(y) - offset;
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
HYPRE_Int idxstride_y = hypre_VectorIndexStride(y);
HYPRE_Int vecstride_y = hypre_VectorVectorStride(y);
/*HYPRE_Int idxstride_b = hypre_VectorIndexStride(b);
HYPRE_Int vecstride_b = hypre_VectorVectorStride(b);*/
HYPRE_Int idxstride_x = hypre_VectorIndexStride(x);
HYPRE_Int vecstride_x = hypre_VectorVectorStride(x);
HYPRE_Complex temp, tempx;
HYPRE_Int i, j, jj, m, ierr = 0;
HYPRE_Real xpar = 0.7;
hypre_Vector *x_tmp = NULL;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert(num_vectors == hypre_VectorNumVectors(y));
hypre_assert(num_vectors == hypre_VectorNumVectors(b));
if (num_cols != x_size)
{
ierr = 1;
}
if (num_rows != y_size || num_rows != b_size)
{
ierr = 2;
}
if (num_cols != x_size && (num_rows != y_size || num_rows != b_size))
{
ierr = 3;
}
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows * num_vectors; i++)
{
y_data[i] = beta * b_data[i];
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin;
#endif
return ierr;
}
if (x == y)
{
x_tmp = hypre_SeqVectorCloneDeep(x);
x_data = hypre_VectorData(x_tmp);
}
temp = beta / alpha;
if (num_vectors > 1)
{
/*-----------------------------------------------------------------------
* y = (beta/alpha)*b
*-----------------------------------------------------------------------*/
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows * num_vectors; i++)
{
y_data[i] = 0.0;
}
}
else if (temp == 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows * num_vectors; i++)
{
y_data[i] = b_data[i];
}
}
else if (temp == -1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows * num_vectors; i++)
{
y_data[i] = -b_data[i];
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows * num_vectors; i++)
{
y_data[i] = temp * b_data[i];
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
if (num_rownnz < xpar * num_rows)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jj,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
for (j = 0; j < num_vectors; j++)
{
tempx = 0.0;
for (jj = A_i[m]; jj < A_i[m + 1]; jj++)
{
tempx += A_data[jj] * x_data[j * vecstride_x + A_j[jj] * idxstride_x];
}
y_data[j * vecstride_y + m * idxstride_y] += tempx;
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jj,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
for (j = 0; j < num_vectors; ++j)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
tempx += A_data[jj] * x_data[j * vecstride_x + A_j[jj] * idxstride_x];
}
y_data[j * vecstride_y + i * idxstride_y] += tempx;
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows * num_vectors; i++)
{
y_data[i] *= alpha;
}
}
}
else if (num_rownnz < xpar * num_rows)
{
/* use rownnz pointer to do the A*x multiplication when
num_rownnz is smaller than xpar*num_rows */
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
y_data[i] = 0.0;
}
if (alpha == 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
tempx = 0.0;
for (j = A_i[m]; j < A_i[m + 1]; j++)
{
tempx += A_data[j] * x_data[A_j[j]];
}
y_data[m] = tempx;
}
} // y = A*x
else if (alpha == -1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
tempx = 0.0;
for (j = A_i[m]; j < A_i[m + 1]; j++)
{
tempx -= A_data[j] * x_data[A_j[j]];
}
y_data[m] = tempx;
}
} // y = -A*x
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
tempx = 0.0;
for (j = A_i[m]; j < A_i[m + 1]; j++)
{
tempx += A_data[j] * x_data[A_j[j]];
}
y_data[m] = alpha * tempx;
}
} // y = alpha*A*x
} // temp == 0
else if (temp == -1.0) // beta == -alpha
{
if (alpha == 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
y_data[i] = -b_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
tempx = 0.0;
for (j = A_i[m]; j < A_i[m + 1]; j++)
{
tempx += A_data[j] * x_data[A_j[j]];
}
y_data[m] += tempx;
}
} // y = A*x - b
else if (alpha == -1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
y_data[i] = b_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
tempx = 0.0;
for (j = A_i[m]; j < A_i[m + 1]; j++)
{
tempx += A_data[j] * x_data[A_j[j]];
}
y_data[m] -= tempx;
}
} // y = -A*x + b
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
y_data[i] = -alpha * b_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
tempx = 0.0;
for (j = A_i[m]; j < A_i[m + 1]; j++)
{
tempx += A_data[j] * x_data[A_j[j]];
}
y_data[m] += alpha * tempx;
}
} // y = alpha*(A*x - b)
} // temp == -1
else if (temp == 1.0) // beta == alpha
{
if (alpha == 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
y_data[i] = b_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
tempx = 0.0;
for (j = A_i[m]; j < A_i[m + 1]; j++)
{
tempx += A_data[j] * x_data[A_j[j]];
}
y_data[m] += tempx;
}
} // y = A*x + b
else if (alpha == -1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
y_data[i] = -b_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
tempx = 0.0;
for (j = A_i[m]; j < A_i[m + 1]; j++)
{
tempx -= A_data[j] * x_data[A_j[j]];
}
y_data[m] += tempx;
}
} // y = -A*x - b
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
y_data[i] = alpha * b_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
tempx = 0.0;
for (j = A_i[m]; j < A_i[m + 1]; j++)
{
tempx += A_data[j] * x_data[A_j[j]];
}
y_data[m] += alpha * tempx;
}
} // y = alpha*(A*x + b)
}
else
{
if (alpha == 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
y_data[i] = beta * b_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
tempx = 0.0;
for (j = A_i[m]; j < A_i[m + 1]; j++)
{
tempx += A_data[j] * x_data[A_j[j]];
}
y_data[m] += tempx;
}
} // y = A*x + beta*b
else if (-1 == alpha)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
y_data[i] = -temp * b_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
tempx = 0.0;
for (j = A_i[m]; j < A_i[m + 1]; j++)
{
tempx -= A_data[j] * x_data[A_j[j]];
}
y_data[m] += tempx;
}
} // y = -A*x - temp*b
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
y_data[i] = beta * b_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
tempx = 0.0;
for (j = A_i[m]; j < A_i[m + 1]; j++)
{
tempx += A_data[j] * x_data[A_j[j]];
}
y_data[m] += alpha * tempx;
}
} // y = alpha*(A*x + temp*b)
} // temp != 0 && temp != -1 && temp != 1
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,jj,tempx)
#endif
{
HYPRE_Int iBegin = hypre_CSRMatrixGetLoadBalancedPartitionBegin(A);
HYPRE_Int iEnd = hypre_CSRMatrixGetLoadBalancedPartitionEnd(A);
hypre_assert(iBegin <= iEnd);
hypre_assert(iBegin >= 0 && iBegin <= num_rows);
hypre_assert(iEnd >= 0 && iEnd <= num_rows);
if (temp == 0.0)
{
if (alpha == 1.0) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x
else if (alpha == -1.0)
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha * tempx;
}
} // y = alpha*A*x
} // temp == 0
else if (temp == -1.0) // beta == -alpha
{
if (alpha == 1.0) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
y_data[i] = -b_data[i];
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] += tempx;
}
} // y = A*x - y
else if (alpha == -1.0) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
y_data[i] = b_data[i];
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] += tempx;
}
} // y = -A*x + y
else
{
for (i = iBegin; i < iEnd; i++)
{
y_data[i] = -alpha * b_data[i];
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] += alpha * tempx;
}
} // y = alpha*(A*x - y)
} // temp == -1
else if (temp == 1.0)
{
if (alpha == 1.0) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
y_data[i] = b_data[i];
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] += tempx;
}
} // y = A*x + y
else if (alpha == -1.0)
{
for (i = iBegin; i < iEnd; i++)
{
y_data[i] = -b_data[i];
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] += tempx;
}
} // y = -A*x - y
else
{
for (i = iBegin; i < iEnd; i++)
{
y_data[i] = alpha * b_data[i];
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] += alpha * tempx;
}
} // y = alpha*(A*x + y)
}
else
{
if (alpha == 1.0) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
y_data[i] = b_data[i] * temp;
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] += tempx;
}
} // y = A*x + temp*y
else if (alpha == -1.0)
{
for (i = iBegin; i < iEnd; i++)
{
y_data[i] = -b_data[i] * temp;
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] += tempx;
}
} // y = -A*x - temp*y
else
{
for (i = iBegin; i < iEnd; i++)
{
y_data[i] = b_data[i] * beta;
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] += alpha * tempx;
}
} // y = alpha*(A*x + temp*y)
} // temp != 0 && temp != -1 && temp != 1
} // omp parallel
}
if (x == y)
{
hypre_SeqVectorDestroy(x_tmp);
}
return ierr;
}
HYPRE_Int
hypre_CSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *b,
hypre_Vector *y,
HYPRE_Int offset )
{
#ifdef HYPRE_PROFILE
HYPRE_Real time_begin = hypre_MPI_Wtime();
#endif
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_GPU)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_CSRMatrixMatvecDevice(0, alpha, A, x, beta, b, y, offset);
}
else
#endif
{
ierr = hypre_CSRMatrixMatvecOutOfPlaceHost(alpha, A, x, beta, b, y, offset);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin;
#endif
return ierr;
}
HYPRE_Int
hypre_CSRMatrixMatvec( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
return hypre_CSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y, 0);
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvecT
*
* This version is using a different (more efficient) threading scheme
* Performs y <- alpha * A^T * x + beta * y
*
* From Van Henson's modification of hypre_CSRMatrixMatvec.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixMatvecTHost( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int y_size = hypre_VectorSize(y);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
HYPRE_Int idxstride_y = hypre_VectorIndexStride(y);
HYPRE_Int vecstride_y = hypre_VectorVectorStride(y);
HYPRE_Int idxstride_x = hypre_VectorIndexStride(x);
HYPRE_Int vecstride_x = hypre_VectorVectorStride(x);
HYPRE_Complex temp;
HYPRE_Complex *y_data_expand;
HYPRE_Int my_thread_num = 0, offset = 0;
HYPRE_Int i, j, jv, jj;
HYPRE_Int num_threads;
HYPRE_Int ierr = 0;
hypre_Vector *x_tmp = NULL;
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_rows != x_size)
{
ierr = 1;
}
if (num_cols != y_size)
{
ierr = 2;
}
if (num_rows != x_size && num_cols != y_size)
{
ierr = 3;
}
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols * num_vectors; i++)
{
y_data[i] *= beta;
}
return ierr;
}
if (x == y)
{
x_tmp = hypre_SeqVectorCloneDeep(x);
x_data = hypre_VectorData(x_tmp);
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols * num_vectors; i++)
{
y_data[i] = 0.0;
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols * num_vectors; i++)
{
y_data[i] *= temp;
}
}
}
/*-----------------------------------------------------------------
* y += A^T*x
*-----------------------------------------------------------------*/
num_threads = hypre_NumThreads();
if (num_threads > 1)
{
y_data_expand = hypre_CTAlloc(HYPRE_Complex, num_threads * y_size, HYPRE_MEMORY_HOST);
if ( num_vectors == 1 )
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,jj,j,my_thread_num,offset)
#endif
{
my_thread_num = hypre_GetThreadNum();
offset = y_size * my_thread_num;
#ifdef HYPRE_USING_OPENMP
#pragma omp for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
j = A_j[jj];
y_data_expand[offset + j] += A_data[jj] * x_data[i];
}
}
/* implied barrier (for threads)*/
#ifdef HYPRE_USING_OPENMP
#pragma omp for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < y_size; i++)
{
for (j = 0; j < num_threads; j++)
{
y_data[i] += y_data_expand[j * y_size + i];
}
}
} /* end parallel threaded region */
}
else
{
/* multiple vector case is not threaded */
for (i = 0; i < num_rows; i++)
{
for ( jv = 0; jv < num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
j = A_j[jj];
y_data[ j * idxstride_y + jv * vecstride_y ] +=
A_data[jj] * x_data[ i * idxstride_x + jv * vecstride_x];
}
}
}
}
hypre_TFree(y_data_expand, HYPRE_MEMORY_HOST);
}
else
{
for (i = 0; i < num_rows; i++)
{
if ( num_vectors == 1 )
{
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
j = A_j[jj];
y_data[j] += A_data[jj] * x_data[i];
}
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
{
j = A_j[jj];
y_data[ j * idxstride_y + jv * vecstride_y ] +=
A_data[jj] * x_data[ i * idxstride_x + jv * vecstride_x ];
}
}
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols * num_vectors; i++)
{
y_data[i] *= alpha;
}
}
if (x == y)
{
hypre_SeqVectorDestroy(x_tmp);
}
return ierr;
}
HYPRE_Int
hypre_CSRMatrixMatvecT( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
HYPRE_Real time_begin = hypre_MPI_Wtime();
#endif
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_GPU)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_CSRMatrixMatvecDevice(1, alpha, A, x, beta, y, y, 0 );
}
else
#endif
{
ierr = hypre_CSRMatrixMatvecTHost(alpha, A, x, beta, y);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin;
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec_FF
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixMatvec_FF( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y,
HYPRE_Int *CF_marker_x,
HYPRE_Int *CF_marker_y,
HYPRE_Int fpt )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int y_size = hypre_VectorSize(y);
HYPRE_Complex temp;
HYPRE_Int i, jj;
HYPRE_Int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_cols != x_size)
{
ierr = 1;
}
if (num_rows != y_size)
{
ierr = 2;
}
if (num_cols != x_size && num_rows != y_size)
{
ierr = 3;
}
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) { y_data[i] *= beta; }
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) { y_data[i] = 0.0; }
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) { y_data[i] *= temp; }
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jj) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
if (CF_marker_x[i] == fpt)
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i + 1]; jj++)
if (CF_marker_y[A_j[jj]] == fpt) { temp += A_data[jj] * x_data[A_j[jj]]; }
y_data[i] = temp;
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) { y_data[i] *= alpha; }
}
return ierr;
}
|
PatchSelect_core.c | /*
* This work is part of the Core Imaging Library developed by
* Visual Analytics and Imaging System Group of the Science Technology
* Facilities Council, STFC and Diamond Light Source Ltd.
*
* Copyright 2017 Daniil Kazantsev
* Copyright 2017 Srikanth Nagella, Edoardo Pasca
* Copyright 2018 Diamond Light Source Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "PatchSelect_core.h"
/* C-OMP implementation of non-local weight pre-calculation for non-local priors
* Weights and associated indices are stored into pre-allocated arrays and passed
* to the regulariser
*
*
* Input Parameters:
* 1. 2D/3D grayscale image/volume
* 2. Searching window (half-size of the main bigger searching window, e.g. 11)
* 3. Similarity window (half-size of the patch window, e.g. 2)
* 4. The number of neighbours to take (the most prominent after sorting neighbours will be taken)
* 5. noise-related parameter to calculate non-local weights
*
* Output [2D]:
* 1. AR_i - indeces of i neighbours
* 2. AR_j - indeces of j neighbours
* 3. Weights_ij - associated weights
*
* Output [3D]:
* 1. AR_i - indeces of i neighbours
* 2. AR_j - indeces of j neighbours
* 3. AR_k - indeces of j neighbours
* 4. Weights_ijk - associated weights
*/
void swap(float *xp, float *yp)
{
float temp = *xp;
*xp = *yp;
*yp = temp;
}
void swapUS(unsigned short *xp, unsigned short *yp)
{
unsigned short temp = *xp;
*xp = *yp;
*yp = temp;
}
/**************************************************/
float PatchSelect_CPU_main(float *A, unsigned short *H_i, unsigned short *H_j, unsigned short *H_k, float *Weights, int dimX, int dimY, int dimZ, int SearchWindow, int SimilarWin, int NumNeighb, float h)
{
int counterG;
long i, j, k;
float *Eucl_Vec, h2;
h2 = h*h;
/****************2D INPUT ***************/
if (dimZ == 0) {
/* generate a 2D Gaussian kernel for NLM procedure */
Eucl_Vec = (float*) calloc ((2*SimilarWin+1)*(2*SimilarWin+1),sizeof(float));
counterG = 0;
for(i=-SimilarWin; i<=SimilarWin; i++) {
for(j=-SimilarWin; j<=SimilarWin; j++) {
Eucl_Vec[counterG] = expf(-(i*i+j*j)/(2.0f*SimilarWin*SimilarWin));
counterG++;
}} /*main neighb loop */
/* for each pixel store indeces of the most similar neighbours (patches) */
#pragma omp parallel for shared (A, Weights, H_i, H_j) private(i,j)
for(j=0; j<(long)(dimY); j++) {
for(i=0; i<(long)(dimX); i++) {
Indeces2D(A, H_i, H_j, Weights, i, j, (long)(dimX), (long)(dimY), Eucl_Vec, NumNeighb, SearchWindow, SimilarWin, h2);
}}
}
else {
/****************3D INPUT ***************/
/* generate a 3D Gaussian kernel for NLM procedure */
Eucl_Vec = (float*) calloc ((2*SimilarWin+1)*(2*SimilarWin+1)*(2*SimilarWin+1),sizeof(float));
counterG = 0;
for(i=-SimilarWin; i<=SimilarWin; i++) {
for(j=-SimilarWin; j<=SimilarWin; j++) {
for(k=-SimilarWin; k<=SimilarWin; k++) {
Eucl_Vec[counterG] = expf(-(i*i+j*j+k*k)/(2.0f*SimilarWin*SimilarWin*SimilarWin));
counterG++;
}}} /*main neighb loop */
/* for each voxel store indeces of the most similar neighbours (patches) */
#pragma omp parallel for shared (A, Weights, H_i, H_j, H_k) private(i,j,k)
for(k=0; k<dimZ; k++) {
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
Indeces3D(A, H_i, H_j, H_k, Weights, i, j, k, (long)(dimX), (long)(dimY), (long)(dimZ), Eucl_Vec, NumNeighb, SearchWindow, SimilarWin, h2);
}}}
}
free(Eucl_Vec);
return 1;
}
float Indeces2D(float *Aorig, unsigned short *H_i, unsigned short *H_j, float *Weights, long i, long j, long dimX, long dimY, float *Eucl_Vec, int NumNeighb, int SearchWindow, int SimilarWin, float h2)
{
long i1, j1, i_m, j_m, i_c, j_c, i2, j2, i3, j3, counter, x, y, index, sizeWin_tot, counterG;
float *Weight_Vec, normsum;
unsigned short *ind_i, *ind_j;
sizeWin_tot = (2*SearchWindow + 1)*(2*SearchWindow + 1);
Weight_Vec = (float*) calloc(sizeWin_tot, sizeof(float));
ind_i = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
ind_j = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
counter = 0;
for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) {
for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) {
i1 = i+i_m;
j1 = j+j_m;
if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY))) {
normsum = 0.0f; counterG = 0;
for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) {
for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) {
i2 = i1 + i_c;
j2 = j1 + j_c;
i3 = i + i_c;
j3 = j + j_c;
if (((i2 >= 0) && (i2 < dimX)) && ((j2 >= 0) && (j2 < dimY))) {
if (((i3 >= 0) && (i3 < dimX)) && ((j3 >= 0) && (j3 < dimY))) {
normsum += Eucl_Vec[counterG]*powf(Aorig[j3*dimX + (i3)] - Aorig[j2*dimX + (i2)], 2);
counterG++;
}}
}}
/* writing temporarily into vectors */
if (normsum > EPS) {
Weight_Vec[counter] = expf(-normsum/h2);
ind_i[counter] = i1;
ind_j[counter] = j1;
counter++;
}
}
}}
/* do sorting to choose the most prominent weights [HIGH to LOW] */
/* and re-arrange indeces accordingly */
for (x = 0; x < counter-1; x++) {
for (y = 0; y < counter-x-1; y++) {
if (Weight_Vec[y] < Weight_Vec[y+1]) {
swap(&Weight_Vec[y], &Weight_Vec[y+1]);
swapUS(&ind_i[y], &ind_i[y+1]);
swapUS(&ind_j[y], &ind_j[y+1]);
}
}
}
/*sorting loop finished*/
/*now select the NumNeighb more prominent weights and store into pre-allocated arrays */
for(x=0; x < NumNeighb; x++) {
index = (dimX*dimY*x) + j*dimX+i;
H_i[index] = ind_i[x];
H_j[index] = ind_j[x];
Weights[index] = Weight_Vec[x];
}
free(ind_i);
free(ind_j);
free(Weight_Vec);
return 1;
}
float Indeces3D(float *Aorig, unsigned short *H_i, unsigned short *H_j, unsigned short *H_k, float *Weights, long i, long j, long k, long dimY, long dimX, long dimZ, float *Eucl_Vec, int NumNeighb, int SearchWindow, int SimilarWin, float h2)
{
long i1, j1, k1, i_m, j_m, k_m, i_c, j_c, k_c, i2, j2, k2, i3, j3, k3, counter, x, y, index, sizeWin_tot, counterG;
float *Weight_Vec, normsum, temp, val;
unsigned short *ind_i, *ind_j, *ind_k, temp_i, temp_j, temp_k;
sizeWin_tot = (2*SearchWindow + 1)*(2*SearchWindow + 1)*(2*SearchWindow + 1);
Weight_Vec = (float*) calloc(sizeWin_tot, sizeof(float));
ind_i = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
ind_j = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
ind_k = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
counter = 0l;
for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) {
for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) {
for(k_m=-SearchWindow; k_m<=SearchWindow; k_m++) {
k1 = k+k_m;
i1 = i+i_m;
j1 = j+j_m;
if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY)) && ((k1 >= 0) && (k1 < dimZ))) {
normsum = 0.0f; counterG = 0l;
for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) {
for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) {
for(k_c=-SimilarWin; k_c<=SimilarWin; k_c++) {
i2 = i1 + i_c;
j2 = j1 + j_c;
k2 = k1 + k_c;
i3 = i + i_c;
j3 = j + j_c;
k3 = k + k_c;
if (((i2 >= 0) && (i2 < dimX)) && ((j2 >= 0) && (j2 < dimY)) && ((k2 >= 0) && (k2 < dimZ))) {
if (((i3 >= 0) && (i3 < dimX)) && ((j3 >= 0) && (j3 < dimY)) && ((k3 >= 0) && (k3 < dimZ))) {
val = Aorig[(dimX*dimY*k3) + j3*dimX + (i3)] - Aorig[(dimX*dimY*k2) + j2*dimX + (i2)];
normsum += Eucl_Vec[counterG]*val*val;
counterG++;
}}
}}}
/* writing temporarily into vectors */
if (normsum > EPS) {
Weight_Vec[counter] = expf(-normsum/h2);
ind_i[counter] = i1;
ind_j[counter] = j1;
ind_k[counter] = k1;
counter ++;
}
}
}}}
/* do sorting to choose the most prominent weights [HIGH to LOW] */
/* and re-arrange indeces accordingly */
for (x = 0; x < counter; x++) {
for (y = 0; y < counter; y++) {
if (Weight_Vec[y] < Weight_Vec[x]) {
temp = Weight_Vec[y+1];
temp_i = ind_i[y+1];
temp_j = ind_j[y+1];
temp_k = ind_k[y+1];
Weight_Vec[y+1] = Weight_Vec[y];
Weight_Vec[y] = temp;
ind_i[y+1] = ind_i[y];
ind_i[y] = temp_i;
ind_j[y+1] = ind_j[y];
ind_j[y] = temp_j;
ind_k[y+1] = ind_k[y];
ind_k[y] = temp_k;
}}}
/*sorting loop finished*/
/*now select the NumNeighb more prominent weights and store into arrays */
for(x=0; x < NumNeighb; x++) {
index = dimX*dimY*dimZ*x + (dimX*dimY*k) + j*dimX+i;
H_i[index] = ind_i[x];
H_j[index] = ind_j[x];
H_k[index] = ind_k[x];
Weights[index] = Weight_Vec[x];
}
free(ind_i);
free(ind_j);
free(ind_k);
free(Weight_Vec);
return 1;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.