source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
simd-14.c | /* { dg-do run } */
/* { dg-additional-options "-msse2" { target sse2_runtime } } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
int
main ()
{
int i, j, b, c = 0;
i = 4; j = 4; b = 7;
#pragma omp simd linear(b:2) reduction(+:c)
for (i = 0; i < 64; i++)
{
c = c + (b != 7 + 2 * i);
b = b + 2;
}
if (c || i != 64 || b != 7 + 64 * 2)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp simd linear(b:3) reduction(+:c)
for (i = 0; i < 64; i += 4)
{
c = c + (b != 7 + i / 4 * 3);
b = b + 3;
}
if (c || i != 64 || b != 7 + 16 * 3)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp simd linear(i) linear(b:2) reduction(+:c)
for (i = 0; i < 64; i++)
{
c = c + (b != 7 + 2 * i);
b = b + 2;
}
if (c || i != 64 || b != 7 + 64 * 2)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp simd linear(i:4) linear(b:3) reduction(+:c)
for (i = 0; i < 64; i += 4)
{
c = c + (b != 7 + i / 4 * 3);
b = b + 3;
}
if (c || i != 64 || b != 7 + 16 * 3)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp simd collapse (2) linear(b:2) reduction(+:c)
for (i = 0; i < 8; i++)
for (j = 0; j < 8; j++)
{
c = c + (b != 7 + 2 * j + 2 * 8 * i);
b = b + 2;
}
if (c || i != 8 || j != 8 || b != 7 + 64 * 2)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp simd collapse (2) lastprivate (i, j) linear(b:2) reduction(+:c)
for (i = 0; i < 8; i++)
for (j = 0; j < 8; j++)
{
c = c + (b != 7 + 2 * j + 2 * 8 * i);
b = b + 2;
}
if (c || i != 8 || j != 8 || b != 7 + 64 * 2)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp parallel for simd schedule (static, 4) linear(b:2) reduction(+:c)
for (i = 0; i < 64; i++)
{
c = c + (b != 7 + 2 * i);
b = b + 2;
}
if (c || i != 64 || b != 7 + 64 * 2)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp parallel for simd schedule (static, 4) linear(b:3) reduction(+:c)
for (i = 0; i < 64; i += 4)
{
c = c + (b != 7 + i / 4 * 3);
b = b + 3;
}
if (c || i != 64 || b != 7 + 16 * 3)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp parallel for simd schedule (static, 4) linear(i) linear(b:2) reduction(+:c)
for (i = 0; i < 64; i++)
{
c = c + (b != 7 + 2 * i);
b = b + 2;
}
if (c || i != 64 || b != 7 + 64 * 2)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp parallel for simd schedule (static, 4) linear(i:4) linear(b:3) reduction(+:c)
for (i = 0; i < 64; i += 4)
{
c = c + (b != 7 + i / 4 * 3);
b = b + 3;
}
if (c || i != 64 || b != 7 + 16 * 3)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp parallel for simd lastprivate (i, j) collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)
for (i = 0; i < 8; i++)
for (j = 0; j < 8; j++)
{
c = c + (b != 7 + 2 * j + 2 * 8 * i);
b = b + 2;
}
if (c || i != 8 || j != 8 || b != 7 + 64 * 2)
__builtin_abort ();
i = 4; j = 4; b = 7;
#pragma omp parallel for simd collapse (2) schedule (static, 4) linear(b:2) reduction(+:c)
for (i = 0; i < 8; i++)
for (j = 0; j < 8; j++)
{
c = c + (b != 7 + 2 * j + 2 * 8 * i);
b = b + 2;
}
if (c || i != 8 || j != 8 || b != 7 + 64 * 2)
__builtin_abort ();
return 0;
}
|
GB_unop__identity_int8_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int8_uint8)
// op(A') function: GB (_unop_tran__identity_int8_uint8)
// C type: int8_t
// A type: uint8_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = (int8_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int8_uint8)
(
int8_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int8_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
openmp.c | /******************************************************************************
* FILE: omp_hello.c
* DESCRIPTION:
* OpenMP Example - Hello World - C/C++ Version
* In this simple example, the master thread forks a parallel region.
* All threads in the team obtain their unique thread number and print it.
* The master thread only prints the total number of threads. Two OpenMP
* library routines are used to obtain the number of threads and each
* thread's number.
* ORIGINAL AUTHOR: Blaise Barney
* LAST REVISED: 11/05/19
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[]){
int nthreads, tid;
/* Fork a team of threads giving them their own copies of variables */
#pragma omp parallel private(nthreads, tid) {
tid = omp_get_thread_num(); /* Obtain thread number */
printf("Hello World from thread = %d\n", tid);
/* Only master thread does this */
if (tid == 0) {
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} /* All threads join master thread and disband */
}
|
libperf_int.h | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2015. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2016. ALL RIGHTS RESERVED.
*
* See file LICENSE for terms.
*/
#ifndef LIBPERF_INT_H_
#define LIBPERF_INT_H_
#include <tools/perf/api/libperf.h>
BEGIN_C_DECLS
/** @file libperf_int.h */
#include <ucs/async/async.h>
#include <ucs/time/time.h>
#include <ucs/sys/math.h>
#if _OPENMP
#include <omp.h>
#endif
#define TIMING_QUEUE_SIZE 2048
#define UCT_PERF_TEST_AM_ID 5
#define ADDR_BUF_SIZE 2048
#define UCX_PERF_TEST_FOREACH(perf) \
while (!ucx_perf_context_done(perf))
#define rte_call(_perf, _func, ...) \
((_perf)->params.rte->_func((_perf)->params.rte_group, ## __VA_ARGS__))
typedef struct ucx_perf_context ucx_perf_context_t;
typedef struct uct_peer uct_peer_t;
typedef struct ucp_perf_request ucp_perf_request_t;
typedef struct ucx_perf_thread_context ucx_perf_thread_context_t;
struct ucx_perf_allocator {
ucs_memory_type_t mem_type;
ucs_status_t (*init)(ucx_perf_context_t *perf);
ucs_status_t (*ucp_alloc)(const ucx_perf_context_t *perf, size_t length,
void **address_p, ucp_mem_h *memh, int non_blk_flag);
void (*ucp_free)(const ucx_perf_context_t *perf, void *address,
ucp_mem_h memh);
ucs_status_t (*uct_alloc)(const ucx_perf_context_t *perf, size_t length,
unsigned flags, uct_allocated_memory_t *alloc_mem);
void (*uct_free)(const ucx_perf_context_t *perf,
uct_allocated_memory_t *alloc_mem);
void (*memcpy)(void *dst, ucs_memory_type_t dst_mem_type,
const void *src, ucs_memory_type_t src_mem_type,
size_t count);
void* (*memset)(void *dst, int value, size_t count);
};
struct ucx_perf_context {
ucx_perf_params_t params;
/* Buffers */
void *send_buffer;
void *recv_buffer;
/* Measurements */
double start_time_acc; /* accurate start time */
ucs_time_t end_time; /* inaccurate end time (upper bound) */
ucs_time_t prev_time; /* time of previous iteration */
ucs_time_t report_interval; /* interval of showing report */
ucx_perf_counter_t max_iter;
/* Measurements of current/previous **report** */
struct {
ucx_perf_counter_t msgs; /* number of messages */
ucx_perf_counter_t bytes; /* number of bytes */
ucx_perf_counter_t iters; /* number of iterations */
ucs_time_t time; /* inaccurate time (for median and report interval) */
double time_acc; /* accurate time (for avg latency/bw/msgrate) */
} current, prev;
ucs_time_t timing_queue[TIMING_QUEUE_SIZE];
unsigned timing_queue_head;
const ucx_perf_allocator_t *allocator;
union {
struct {
ucs_async_context_t async;
uct_component_h cmpt;
uct_md_h md;
uct_worker_h worker;
uct_iface_h iface;
uct_peer_t *peers;
uct_allocated_memory_t send_mem;
uct_allocated_memory_t recv_mem;
uct_iov_t *iov;
} uct;
struct {
ucp_context_h context;
ucx_perf_thread_context_t* tctx;
ucp_worker_h worker;
ucp_ep_h ep;
ucp_rkey_h rkey;
unsigned long remote_addr;
ucp_mem_h send_memh;
ucp_mem_h recv_memh;
ucp_dt_iov_t *send_iov;
ucp_dt_iov_t *recv_iov;
void *am_hdr;
} ucp;
};
};
struct ucx_perf_thread_context {
pthread_t pt;
int tid;
ucs_status_t status;
ucx_perf_context_t perf;
ucx_perf_result_t result;
};
struct uct_peer {
uct_ep_h ep;
unsigned long remote_addr;
uct_rkey_bundle_t rkey;
};
struct ucp_perf_request {
void *context;
};
typedef struct {
ucs_status_t (*setup)(ucx_perf_context_t *perf);
void (*cleanup)(ucx_perf_context_t *perf);
ucs_status_t (*run)(ucx_perf_context_t *perf);
void (*barrier)(ucx_perf_context_t *perf);
} ucx_perf_funcs_t;
extern ucx_perf_funcs_t ucx_perf_funcs[];
void ucx_perf_test_start_clock(ucx_perf_context_t *perf);
void uct_perf_ep_flush_b(ucx_perf_context_t *perf, int peer_index);
void uct_perf_iface_flush_b(ucx_perf_context_t *perf);
ucs_status_t uct_perf_test_dispatch(ucx_perf_context_t *perf);
ucs_status_t ucp_perf_test_dispatch(ucx_perf_context_t *perf);
void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result);
void uct_perf_barrier(ucx_perf_context_t *perf);
void ucp_perf_thread_barrier(ucx_perf_context_t *perf);
void ucp_perf_barrier(ucx_perf_context_t *perf);
ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf);
void ucp_perf_test_free_mem(ucx_perf_context_t *perf);
ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf);
void uct_perf_test_free_mem(ucx_perf_context_t *perf);
ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result);
void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf,
const ucx_perf_params_t *params);
void ucx_perf_set_warmup(ucx_perf_context_t* perf,
const ucx_perf_params_t* params);
/**
* Get the total length of the message size given by parameters
*/
size_t ucx_perf_get_message_size(const ucx_perf_params_t *params);
static UCS_F_ALWAYS_INLINE int ucx_perf_context_done(ucx_perf_context_t *perf)
{
return ucs_unlikely((perf->current.iters >= perf->max_iter) ||
(perf->current.time > perf->end_time));
}
static inline void ucx_perf_get_time(ucx_perf_context_t *perf)
{
perf->current.time_acc = ucs_get_accurate_time();
}
static inline void ucx_perf_omp_barrier(ucx_perf_context_t *perf)
{
#if _OPENMP
if (perf->params.thread_count > 1) {
#pragma omp barrier
}
#endif
}
static inline void ucx_perf_update(ucx_perf_context_t *perf,
ucx_perf_counter_t iters, size_t bytes)
{
ucx_perf_result_t result;
perf->current.time = ucs_get_time();
perf->current.iters += iters;
perf->current.bytes += bytes;
perf->current.msgs += 1;
perf->timing_queue[perf->timing_queue_head] =
perf->current.time - perf->prev_time;
++perf->timing_queue_head;
if (perf->timing_queue_head == TIMING_QUEUE_SIZE) {
perf->timing_queue_head = 0;
}
perf->prev_time = perf->current.time;
if (perf->current.time - perf->prev.time >= perf->report_interval) {
ucx_perf_get_time(perf);
ucx_perf_calc_result(perf, &result);
rte_call(perf, report, &result, perf->params.report_arg, 0, 0);
perf->prev = perf->current;
}
}
END_C_DECLS
#endif
|
AI_model2_ab.c | #include"AI.h"
#include <omp.h>
#define MAXSTEP 4
//#define CHECK_SCORE
//This is for model2 Alpha-Beta Pruning
//the simulation function for the branches in the searching tree
int ai_model2_AB_simulate(GameState *gameState, Player *player,
int alpha, int beta, int depth)
{
int score_of_state=ai_sum_scores(gameState,player);
if(gameState->playerTurn>0){
alpha=MAX(score_of_state,alpha);
if(beta<=alpha)return score_of_state;
}
else{
beta=MIN(score_of_state,beta);
if(beta<=alpha)return score_of_state;
}
if(depth<=0)return score_of_state;
int MaxScore=-60000;
int playerTurn=gameState->playerTurn;
int total_num_moves=0;
vector MovesStart,MovesEnd;
vector_init(&MovesStart);
vector_init(&MovesEnd);
int cnt=0;
for(int i=0;i<64;i++)
{
vector CurLegalMoves=env_get_legal_moves(gameState,player,i);
cnt=CurLegalMoves.count;
if(cnt>0){
vector_cat(&MovesEnd,&CurLegalMoves);
for(int j=0;j<cnt;j++) vector_add(&MovesStart,i);
}
vector_free(&CurLegalMoves);
total_num_moves+=cnt;
}
assert(MovesStart.count==MovesEnd.count);
int *Scores=malloc(sizeof(int)*total_num_moves);
// #pragma omp parallel for shared(total_num_moves,gameState,player,MovesStart,MovesEnd,depth,Scores,playerTurn)
for(int i=0;i<total_num_moves;i++)
{
GameState simulation=env_copy_State(gameState);
env_play(&simulation,player,vector_get(&MovesStart,i),vector_get(&MovesEnd,i));
int score=playerTurn*ai_model2_AB_simulate(&simulation,player,
alpha,beta,depth-1);
Scores[i]=score;
env_free_state(&simulation);
}
for(int i=0;i<total_num_moves;i++)MaxScore=MAX(MaxScore,Scores[i]);
vector_free(&MovesStart);
vector_free(&MovesEnd);
free(Scores);
return MaxScore*playerTurn;
}
//the play function for the root in the searching tree, return the quit from check_end
int ai_model2_AB_play(GameState *gameState, Player *player, int maxStep)
{
int check_end=env_check_end(gameState,player);
if(check_end!=0)
{
env_free_container(gameState);
return check_end;
}
int MaxScore=-60000;
int score;
// vector MovesStart,MovesEnd,Scores;
// vector_init(&BestMovesID);
// vector_init(&MovesStart);
// vector_init(&MovesEnd);
// vector_init(&Scores);
int container_size=gameState->moves_vector_cnt;
int total_num_moves=0;
int *accu_container_size_arr=malloc(sizeof(int)*gameState->moves_vector_cnt);
for(int i=0;i<container_size;i++){
total_num_moves+=gameState->container[i].legal_moves.count;
if(i==0)accu_container_size_arr[0]=0;
else accu_container_size_arr[i]=accu_container_size_arr[i-1]+gameState->container[i-1].legal_moves.count;
}
int *MovesStart=malloc(sizeof(int)*total_num_moves);
int *MovesEnd=malloc(sizeof(int)*total_num_moves);
int *Scores=malloc(sizeof(int)*total_num_moves);
omp_set_num_threads(16);
for(int i=0;i<container_size;i++)
{
vector CurLegalMoves=gameState->container[i].legal_moves;
int cnt=CurLegalMoves.count;
int pos=gameState->container[i].pos;
for(int j=0;j<cnt;j++){
MovesStart[accu_container_size_arr[i]+j]=pos;
MovesEnd[accu_container_size_arr[i]+j]=vector_get(&CurLegalMoves,j);
}
}
// assert(MovesStart.count==MovesEnd.count);
int playerTurn=gameState->playerTurn;
#pragma omp parallel for shared(gameState,player,MovesStart,MovesEnd,Scores,playerTurn)
for(int i=0;i<total_num_moves;i++)
{
GameState simulation=env_copy_State(gameState);
env_play(&simulation,player,MovesStart[i],MovesEnd[i]);
score=playerTurn*ai_model2_AB_simulate(&simulation,player,
-60000,60000,maxStep);
Scores[i]=score;
env_free_state(&simulation);
}
int BestMovesCnt=0;
vector BestMovesID;
vector_init(&BestMovesID);
if(stack_check_repeated_move(gameState->moves_stack)){
int MaxScoresArr[6];
for(int i=0;i<6;i++)MaxScoresArr[i]=-60000;
int MinScoreID,MinScoreArrValue;
for(int i=0;i<total_num_moves;i++){
MinScoreArrValue=MaxScoresArr[0];
MinScoreID=0;
for(int j=1;j<6;j++){
if(MaxScoresArr[j]<MinScoreArrValue){
MinScoreArrValue=MaxScoresArr[j];
MinScoreID=j;
}
}
MaxScoresArr[MinScoreID]=MAX(MaxScoresArr[MinScoreID],Scores[i]);
}
for(int i=0;i<total_num_moves;i++){
for(int j=0;j<6;j++){
if(Scores[i]==MaxScoresArr[j]){
vector_add(&BestMovesID,i);
BestMovesCnt++;
}
}
}
}
else{
for(int i=0;i<total_num_moves;i++)MaxScore=MAX(MaxScore,Scores[i]);
for(int i=0;i<total_num_moves;i++){
if(Scores[i]==MaxScore){
vector_add(&BestMovesID,i);
BestMovesCnt++;
}
}
}
int id=vector_get(&BestMovesID,rand()%BestMovesCnt);
#ifdef CHECK_SCORE
printf("It is %d playing\n",gameState->playerTurn);
ai_print_board(gameState);
printf("Current Score is %d\n",ai_sum_scores(gameState,player));
#endif
env_play(gameState,player,MovesStart[id],MovesEnd[id]);
#ifdef CHECK_SCORE
printf("The player has decided to move from %d to %d\n",vector_get(&MovesStart,id),vector_get(&MovesEnd,id));
ai_print_board(gameState);
printf("After making the move, the score is %d\n",ai_sum_scores(gameState,player));
#endif
vector_free(&BestMovesID);
free(MovesStart);
free(MovesEnd);
free(Scores);
env_free_container(gameState);
return 0;
}
|
GB_binop__ne_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_int32)
// A.*B function (eWiseMult): GB (_AemultB_01__ne_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__ne_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_int32)
// A*D function (colscale): GB (_AxD__ne_int32)
// D*A function (rowscale): GB (_DxB__ne_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_int32)
// C=scalar+B GB (_bind1st__ne_int32)
// C=scalar+B' GB (_bind1st_tran__ne_int32)
// C=A+scalar GB (_bind2nd__ne_int32)
// C=A'+scalar GB (_bind2nd_tran__ne_int32)
// C type: bool
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_INT32 || GxB_NO_NE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ne_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_alloc_null_fb.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <omp.h>
int main() {
omp_alloctrait_t at[2];
omp_allocator_handle_t a;
void *p[2];
at[0].key = omp_atk_pool_size;
at[0].value = 2 * 1024 * 1024;
at[1].key = omp_atk_fallback;
at[1].value = omp_atv_null_fb;
a = omp_init_allocator(omp_large_cap_mem_space, 2, at);
printf("allocator large created: %p\n", a);
#pragma omp parallel num_threads(2)
{
int i = omp_get_thread_num();
#pragma omp barrier
p[i] = omp_alloc(1024 * 1024, a);
#pragma omp barrier
printf("th %d, ptr %p\n", i, p[i]);
omp_free(p[i], a);
}
// As an allocator has some small memory overhead
// exactly one of the two pointers should be NULL
// because of NULL fallback requested
if ((p[0] == NULL && p[1] != NULL) || (p[0] != NULL && p[1] == NULL)) {
printf("passed\n");
return 0;
} else {
printf("failed: pointers %p %p\n", p[0], p[1]);
return 1;
}
}
|
Render.h | //
// Render.h
// RayTracer
//
// Created by Aluex on 1/19/15.
// Copyright (c) 2015 Aluex. All rights reserved.
//
#ifndef __RayTracer__Render__
#define __RayTracer__Render__
#include <stdio.h>
#include <cassert>
#include "Frame.h"
#include "Scene.h"
#include "Defaults.h"
#include "Utils.h"
#include <vector>
#include <cmath>
#include <queue>
#include <opencv2/opencv.hpp>
class Render
{
cv::Mat buffer;
int height;
int width;
public:
double invGamma;
int hn, wn;
int MAX_DEPTH;
double secondRayControl;
int extraReflectionTest;
Frame *frame;
Scene *scene;
Render(cv::Mat m, int height, int width, int hn, int wn, double invGamma, int MAX_DEPTH, double secondRayControl, int extraReflectionTest): buffer(m), height(height), width(width), hn(hn), wn(wn), invGamma(invGamma), MAX_DEPTH(MAX_DEPTH), secondRayControl(secondRayControl), extraReflectionTest(extraReflectionTest)
{
//////
}
bool rayTrace(Ray &ray, double & tRes, ShadeRec & resSr)
{
double tMin = HUGE_double, t;
bool notFound = true;
ShadeRec tmpSr(resSr.scene);
tmpSr.color = BLACK;
//ray.direction.Normalize();
std::vector<Geometry*>::iterator it = scene->objList.begin();
for(;it!=scene->objList.end();++it)
if((*it)->hit(ray, t, tmpSr) && t<tMin && t > 0){
tMin = t;
notFound = false;
resSr = tmpSr;
//resSr.hitObject = *it; // Set by the hit function.
//resSr.color = (*it)->getColor(tmpSr.hitPoint);
//resSr.color = tmpSr.hitObject->getColor(tmpSr.hitPoint);
}
if(notFound)return false;
tRes = tMin;
return true;
}
RGBColor gammaAdjust(RGBColor &color)
{
RGBColor resColor(color);
resColor[0] = min(int(pow(resColor[0], invGamma)), 255);
resColor[1] = min(int(pow(resColor[1], invGamma)), 255);
resColor[2] = min(int(pow(resColor[2], invGamma)), 255);
return resColor;
}
void renderPixel(int i, int j)
{
cv::Vec3b & color = buffer.at<cv::Vec3b>(i,j);
Ray ray; double tMin;
Vector colorFinal(0,0,0);
for(int iSample=0; iSample<hn; ++iSample)
for(int jSample=0; jSample<wn; ++jSample)
{
frame->getDirectionWithSampler(i, j, ray);
//frame->getDirectionWithSamplerWithDOF(i, j, ray);
////////////
std::vector<restrictRay> rayQueue;
rayQueue.push_back(restrictRay(ray, 1.0, 1.0, 0));
RGBColor colorPhong(0,0,0);
do{
restrictRay currentRay = rayQueue.back();
rayQueue.pop_back();
ShadeRec tmpSr(scene), newSr(scene);
Ray &ray = std::get<0>(currentRay);
double currentFactor = std::get<1>(currentRay), N = std::get<2>(currentRay);
int depth = std::get<3>(currentRay);
if(rayTrace(ray, tMin, tmpSr))
{
tmpSr.normal.Normalize();
Vector V = ray.direction;
if(tmpSr.hitObject->bothSide && tmpSr.normal * V > 0)
tmpSr.normal = - tmpSr.normal;
V.Normalize();
Material *currentMaterial = tmpSr.hitObject->material;
Vector tmpColor(tmpSr.color.prodEWise(currentMaterial->ambientFact));
for(std::vector<Light*>::iterator it=
scene->lightList.begin(); it!=scene->lightList.end(); ++it)
{
//Vector L = (*it)->position - tmpSr.hitPoint;
Vector L = (*it)->getPosition() - tmpSr.hitPoint;
Ray reflectRay(tmpSr.hitPoint, L);
if(reflectRay.direction * tmpSr.normal > 0)
{
bool cross = rayTrace(reflectRay, tMin, newSr);
if(cross && tMin < 1.0) continue; // The light is blocked
L.Normalize();
Vector R = tmpSr.normal * (2 * (L * tmpSr.normal)) - L;
double specularBase = -R * V;
if(specularBase<=0)
tmpColor += tmpSr.color.prodEWise(currentMaterial->diffuseFact) * (*it)->intensity * (L* tmpSr.normal);
else
tmpColor += tmpSr.color.prodEWise(currentMaterial->diffuseFact) * (*it)->intensity * (L* tmpSr.normal)
+ currentMaterial->specularFact.prodEWise((*it)->color) * (*it)->intensity*pow(-R * V, currentMaterial->specularPower);
}
}
colorPhong += tmpColor * currentFactor;
if(depth < MAX_DEPTH)
{
double nextRefractionFactor = currentFactor * currentMaterial->refractionFact;
if(tmpSr.hitObject->bothSide && tmpSr.normal * V > 0)
tmpSr.normal = -tmpSr.normal; // Both side can reflect or refract;
Vector reflectDirection = (tmpSr.normal * (2 * (-V * tmpSr.normal)) + V);
//double nextReflectionFactor =currentFactor * currentMaterial->reflectionFact;
double nextReflectionFactor =currentFactor * currentMaterial->reflectionFact;
if(nextReflectionFactor > DEFAULT_EPSILON)
{
Ray reflectRay(tmpSr.hitPoint, reflectDirection * secondRayControl);
rayQueue.push_back(restrictRay(reflectRay, nextReflectionFactor, N, depth+1));
}
if(nextRefractionFactor > DEFAULT_EPSILON)
{
double matN = 1.0/currentMaterial->N, cosI = - tmpSr.normal * V;
double cosJSQ = 1 - matN*matN *(1 - cosI * cosI);
if(cosJSQ>0) // Otherwise complete reflection
{
Vector T = tmpSr.normal * (matN * cosI - sqrt(cosJSQ)) + V * matN;
rayQueue.push_back(restrictRay(Ray(tmpSr.hitPoint, T * secondRayControl), nextRefractionFactor, N * matN, depth+1));
}
}
for(int extraReflectionCounter = 0; extraReflectionCounter < extraReflectionTest; ++ extraReflectionCounter)
{
double changeX = reflectDirection[0] *(getRandomDouble() + 0.5), changeY = reflectDirection[1] * (getRandomDouble() + 0.5), changeZ = reflectDirection[2] * (getRandomDouble() + 0.5);
Vector extraReflectRay(changeX, changeY, changeZ);
if(extraReflectRay * tmpSr.normal < 0)
continue;
extraReflectRay.Normalize();
double extraReflectionFactor = nextReflectionFactor * pow(reflectDirection * extraReflectRay, currentMaterial->glossyFact);
assert(!std::isnan(extraReflectionFactor));
if(extraReflectionFactor > DEFAULT_EPSILON)
rayQueue.push_back(restrictRay(
Ray(tmpSr.hitPoint, extraReflectRay),
extraReflectionFactor, N, depth+1));
/////
}
}
}
}while(!rayQueue.empty());
colorFinal += colorPhong.sigmoid();
}
colorFinal /= hn * wn;
color = cv::Vec3b((uchar)colorFinal[2], (uchar)colorFinal[1], (uchar)colorFinal[0]);
}
void doRender()
{
assert(frame!=NULL && scene!=NULL);
int count = 0, step = int(height * width * 0.01);
//renderPixel(394, 210);
#pragma omp parallel for schedule(dynamic)
for(int i=0; i<height; ++i)
for(int j=0; j<width; ++j)
{
renderPixel(i,j);
#pragma omp atomic
++count;
if(count%step==0)
printf("%d percent\n", count / step);
}
}
};
#endif /* defined(__RayTracer__Render__) */
|
declare_simd_ast_print.c | // RUN: %clang_cc1 -verify -fopenmp -ast-print %s | FileCheck %s
// RUN: %clang_cc1 -fopenmp -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -include-pch %t -fsyntax-only -verify %s -ast-print | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
#pragma omp declare simd aligned(b : 64)
#pragma omp declare simd simdlen(32) aligned(d, b)
#pragma omp declare simd inbranch, uniform(d) linear(val(s1, s2) : 32)
#pragma omp declare simd notinbranch simdlen(2), uniform(s1, s2) linear(d: s1)
void add_1(float *d, int s1, float *s2, double b[]) __attribute__((cold));
// CHECK: #pragma omp declare simd notinbranch simdlen(2) uniform(s1, s2) linear(val(d): s1)
// CHECK-NEXT: #pragma omp declare simd inbranch uniform(d) linear(val(s1): 32) linear(val(s2): 32)
// CHECK-NEXT: #pragma omp declare simd simdlen(32) aligned(d) aligned(b)
// CHECK-NEXT: #pragma omp declare simd aligned(b: 64)
// CHECK-NEXT: void add_1(float *d, int s1, float *s2, double b[]) __attribute__((cold))
#endif
|
SparseGaussianProcess.h | /*
* Copyright 2015 Christoph Jud (christoph.jud@unibas.ch)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#pragma once
#include <limits>
#include "Kernel.h"
#include "GaussianProcess.h"
namespace gpr{
template <class TScalarType> class SparseLikelihood;
template< class TScalarType >
class SparseGaussianProcess : public GaussianProcess<TScalarType>{
public:
typedef SparseGaussianProcess Self;
typedef std::shared_ptr<Self> Pointer;
typedef GaussianProcess<TScalarType> Superclass;
typedef typename Superclass::VectorType VectorType;
typedef typename Superclass::MatrixType MatrixType;
typedef typename Superclass::DiagMatrixType DiagMatrixType;
typedef typename Superclass::VectorListType VectorListType;
typedef typename Superclass::MatrixListType MatrixListType;
typedef typename Superclass::KernelType KernelType;
typedef typename Superclass::KernelTypePointer KernelTypePointer;
// Constructors
SparseGaussianProcess(KernelTypePointer kernel) : Superclass(kernel),
m_Jitter(0),
m_Initialized(false){}
SparseGaussianProcess(KernelTypePointer kernel, TScalarType jitter) : Superclass(kernel),
m_Jitter(jitter),
m_Initialized(false){}
// Destructor
virtual ~SparseGaussianProcess(){}
/*
* Add a new inducing sample lable pair to the sparse Gaussian process
* x is the input vector
* y the corresponding label vector
*/
void AddInducingSample(const VectorType& x, const VectorType& y){
if(m_InducingSampleVectors.size() == 0){ // first call of AddSample defines dimensionality of input space
this->m_InputDimension = x.size();
}
if(m_InducingLabelVectors.size() == 0){ // first call of AddSample defines dimensionality of output space
this->m_OutputDimension = y.size();
}
this->CheckInputDimension(x, "SparseGaussianProcess::AddInducingSample: ");
this->CheckOutputDimension(y, "SparseGaussianProcess::AddInducingSample: ");
m_InducingSampleVectors.push_back(x);
m_InducingLabelVectors.push_back(y);
m_Initialized = false;
}
/*
* Removes all inducing sample lable pairs from the sparse Gaussian process
*/
void ClearInducingSamples(){
m_InducingSampleVectors.clear();
m_InducingLabelVectors.clear();
m_Initialized = false;
}
VectorType Predict(const VectorType &x){
Initialize();
this->CheckInputDimension(x, "GaussianProcess::Predict: ");
VectorType Kx;
ComputeKernelVector(x, Kx);
return (Kx.adjoint() * m_RegressionVectors).adjoint();
}
TScalarType operator()(const VectorType & x, const VectorType & y){
Initialize();
this->CheckInputDimension(x, "SparseGaussianProcess::(): ");
this->CheckInputDimension(y, "SparseGaussianProcess::(): ");
VectorType Kx;
ComputeKernelVector(x, Kx);
VectorType Ky;
ComputeKernelVector(y, Ky);
return (*this->m_Kernel)(x, y) -
Kx.adjoint() * m_IndusingInvertedKernelMatrix * Ky +
Kx.adjoint() * m_RegressionMatrix * Ky;
}
unsigned GetNumberOfInducingSamples() const{
return m_InducingSampleVectors.size();
}
TScalarType GetJitter() const{
return m_Jitter;
}
void SetJitter(TScalarType jitter){
m_Jitter = jitter;
m_Initialized = false;
}
virtual void Initialize(){
if(m_Initialized){
return;
}
if(!(m_InducingSampleVectors.size() > 0)){
throw std::string("SparseGaussianProcess::Initialize: no inducing samples defined during initialization");
}
if(!(m_InducingLabelVectors.size() > 0)){
throw std::string("SparseGaussianProcess::Initialize: no inducing labels defined during initialization");
}
if(!(this->m_SampleVectors.size() > 0)){
throw std::string("SparseGaussianProcess::Initialize: no dense samples defined during initialization");
}
if(!(this->m_LabelVectors.size() > 0)){
throw std::string("SparseGaussianProcess::Initialize: no dense labels defined during initialization");
}
PreComputeRegression();
m_Initialized = true;
}
// this method is public for using it in testing
virtual void ComputeDenseKernelMatrix(MatrixType &M) const{
if(this->debug){
std::cout << "SparseGaussianProcess::ComputeDenseKernelMatrix: building kernel matrix... ";
std::cout.flush();
}
Superclass::ComputeKernelMatrixInternal(M, this->m_SampleVectors);
if(this->debug) std::cout << "[done]" << std::endl;
}
protected:
/*
* Computation of inducing kernel matrix K_ij = k(x_i, x_j)
* - it is symmetric therefore only half of the kernel evaluations
* has to be performed
*
* (The actual computation is performed in ComputeKernelMatrixInternal)
*/
virtual void ComputeKernelMatrix(MatrixType &M) const{
if(this->debug){
std::cout << "SparseGaussianProcess::ComputeKernelMatrix: building kernel matrix... ";
std::cout.flush();
}
Superclass::ComputeKernelMatrixInternal(M, m_InducingSampleVectors);
if(this->debug) std::cout << "[done]" << std::endl;
}
virtual void ComputeKernelMatrixWithJitter(MatrixType &M) const{
ComputeKernelMatrix(M);
// add jitter to diagonal
for(unsigned i=0; i<M.rows(); i++){
M(i,i) += m_Jitter;
}
}
/*
* Bring the label vectors in a matrix form Y,
* where the rows are the labels.
*
* (it is actually performed in ComputeLabelMatrixInternal)
*/
virtual void ComputeLabelMatrix(MatrixType &Y) const{
Superclass::ComputeLabelMatrixInternal(Y, m_InducingLabelVectors);
}
/*
* Bring the dense label vectors in a matrix form Y
*
* (calls the superclass method)
*/
virtual void ComputeDenseLabelMatrix(MatrixType &Y) const{
Superclass::ComputeLabelMatrix(Y);
}
/*
* Computation of the kernel vector V_i = k(x, x_i)
*
* (calls ComputeKernelVectorInternal)
*/
virtual void ComputeKernelVector(const VectorType &x, VectorType &Kx) const{
Superclass::ComputeKernelVectorInternal(x, Kx, m_InducingSampleVectors);
}
/*
* Computation of the cross-covariance matrix Kmn = k(x_i, y_j)
* where x is in the inducing samples and y in the dense samples
*
* - Kmn = [Kx1 Kx2 ... Kxm] in R^nxm
*
* (calls ComputeKernelVectorInternal)
*/
virtual void ComputeKernelVectorMatrix(MatrixType &Knm) const{
unsigned n = this->m_SampleVectors.size();
unsigned m = m_InducingSampleVectors.size();
if(!(m<=n)){
throw std::string("SparseGaussianProcess::ComputeKernelVectorMatrix: number of dense samples must be higher than the number of sparse samples");
}
Knm.resize(n, m);
#pragma omp parallel for
for(unsigned i=0; i<n; i++){
for(unsigned j=0; j<m; j++){
Knm(i, j) = (*this->m_Kernel)(this->m_SampleVectors[i], m_InducingSampleVectors[j]);
}
}
}
virtual void ComputeDerivativeKernelVectorMatrix(MatrixType &M)const{
unsigned num_params = this->m_Kernel->GetNumberOfParameters();
unsigned n = this->m_SampleVectors.size();
unsigned m = m_InducingSampleVectors.size();
if(!(m<=n)){
throw std::string("SparseGaussianProcess::ComputeDerivativeKernelVectorMatrix: number of dense samples must be higher than the number of sparse samples");
}
M.resize(n*num_params,m);
#pragma omp parallel for
for(unsigned i=0; i<n; i++){
for(unsigned j=0; j<m; j++){
typename GaussianProcess<TScalarType>::VectorType v;
v = this->m_Kernel->GetDerivative(this->m_SampleVectors[i], m_InducingSampleVectors[j]);
if(v.rows() != num_params) throw std::string("SparseGaussianProcess::ComputeDerivativeKernelMatrixInternal: dimension missmatch in derivative.");
for(unsigned p=0; p<num_params; p++){
//if(i+p*n >= M.rows() || j+p*n >= M.rows()) throw std::string("GaussianProcess::ComputeDerivativeKernelMatrix: dimension missmatch in derivative.");
M(i + p*n, j) = v[p];
//M(j + p*n, i) = v[p];
}
}
}
}
/*
* Lerning is performed.
*
* Mean:
* Kxm * inv(Kmm) * mu, mu = sigma^2 Kmm * Sigma * Kmn * Y
*
*/
virtual void PreComputeRegression(){
// Computation of kernel matrix
if(this->debug){
std::cout << "SparseGaussianProcess::PreComputeRegression: calculating regression vectors and regression matrix... " << std::endl;
}
bool stable = (m_Jitter<std::numeric_limits<TScalarType>::min())? true : false;
MatrixType K;
ComputeKernelMatrixWithJitter(K);
// inverting inducing kernel matrix
m_IndusingInvertedKernelMatrix = this->InvertKernelMatrix(K, this->m_InvMethod, stable);
// computing kernel vector matrix between inducing points and dense points
MatrixType Kmn;
ComputeKernelVectorMatrix(Kmn);
// Computing label matrix
// calculate label matrix
// TODO: if a mean support is implemented, the mean has to be subtracted from the labels!
MatrixType Y;
ComputeDenseLabelMatrix(Y);
// computation of Sigma matrix
TScalarType inverse_sigma2 = 1.0/(this->m_Sigma*this->m_Sigma);
MatrixType S = K + inverse_sigma2*Kmn.adjoint()*Kmn;
m_SigmaMatrix = this->InvertKernelMatrix(S, this->m_InvMethod, stable);
// regression vectors for computing mean
m_RegressionVectors = m_IndusingInvertedKernelMatrix * (inverse_sigma2*K*m_SigmaMatrix*Kmn.adjoint()*Y);
// regression matrix for computing variance
m_RegressionMatrix = m_IndusingInvertedKernelMatrix * (K*m_SigmaMatrix*K) * m_IndusingInvertedKernelMatrix;
// core matrix, used for likelihoods
MatrixType C;
ComputeCoreMatrix(C, m_IndusingInvertedKernelMatrix, Kmn);
m_CoreMatrix = C;
}
/*
* Computation of the following:
* - Inducing kernel matrix K
* - Inducing inverted kernel matrix inv(K)
* - Cross kernel matrix Kmn
* - Identity (noise) matrix I_sigma
*/
virtual void ComputeCoreMatrices(MatrixType &K, MatrixType &K_inv, MatrixType &Kmn, DiagMatrixType &I_sigma){
bool stable = (m_Jitter<std::numeric_limits<TScalarType>::min())? true : false;
if(this->debug) std::cout << "SparseGaussianProcess::ComputeCoreMatrices: compute kernel matrix..." << std::flush;
ComputeKernelMatrixWithJitter(K);
if(this->debug) std::cout << " [done]" << std::endl;
if(this->debug) std::cout << "SparseGaussianProcess::ComputeCoreMatrices: invert kernel matrix..." << std::flush;
K_inv = this->InvertKernelMatrix(K, this->m_InvMethod, stable);
if(this->debug) std::cout << " [done]" << std::endl;
if(this->debug) std::cout << "SparseGaussianProcess::ComputeCoreMatrices: compute link kernel matrix..." << std::flush;
ComputeKernelVectorMatrix(Kmn);
if(this->debug) std::cout << " [done]" << std::endl;
if(this->GetSigma()<=0){
throw std::string("SparseGaussianProcess::ComputeCoreMatrices: sigma must be positive.");
}
if(Kmn.rows() == 0){
throw std::string("SparseGaussianProcess::ComputeCoreMatrices: empty sample set.");
}
if(this->debug) std::cout << "SparseGaussianProcess::ComputeCoreMatrices: compute additional noise..." << std::flush;
I_sigma.resize(Kmn.rows());
I_sigma.setIdentity();
I_sigma = (I_sigma.diagonal().array() * this->GetSigmaSquared()).matrix().asDiagonal();
if(this->debug) std::cout << " [done]" << std::endl;
}
/*
* Computation of core matrix: Kmn * inv(Kmm) * Knm
*/
virtual void ComputeCoreMatrix(MatrixType &C, MatrixType &K_inv) const{
bool stable = (m_Jitter<std::numeric_limits<TScalarType>::min())? true : false;
MatrixType K;
ComputeKernelMatrixWithJitter(K);
MatrixType Knm;
ComputeKernelVectorMatrix(Knm);
std::cout << "C " << Knm.rows() << " x " << Knm.cols() << std::endl;
K_inv = this->InvertKernelMatrix(K, this->m_InvMethod, stable);
ComputeCoreMatrix(C, K_inv, Knm);
}
// additional interfaces for ComputeCoreMatrix
virtual void ComputeCoreMatrix(MatrixType &C) const{
MatrixType K_inv;
ComputeCoreMatrix(C, K_inv);
}
virtual void ComputeCoreMatrix(MatrixType &C, const MatrixType& K_inv, const MatrixType& Knm) const{
C = Knm * K_inv * Knm.adjoint();
}
/*
* Computation of the derivative inducing kernel matrix D_i = delta Kmm / delta params_i
* - returns a matrix: [D_0
* .
* D_i
* .
* D_l-1]
* for l = number of params and D_i in mxm, m = number of inducing samples
*/
virtual void ComputeDerivativeKernelMatrix(MatrixType &M) const{
if(this->debug){
std::cout << "SparseGaussianProcess::ComputeDerivativeKernelMatrix: building kernel matrix... ";
std::cout.flush();
}
Superclass::ComputeDerivativeKernelMatrixInternal(M, m_InducingSampleVectors);
if(this->debug) std::cout << "[done]" << std::endl;
}
TScalarType m_Jitter; // noise on inducing kernel matrix
bool m_Initialized;
VectorListType m_InducingSampleVectors; // Dimensionality: TInputDimension
VectorListType m_InducingLabelVectors; // Dimensionality: TOutputDimension
MatrixType m_RegressionVectors; // mu of m(x)
MatrixType m_SigmaMatrix;
MatrixType m_IndusingInvertedKernelMatrix;
MatrixType m_RegressionMatrix;
MatrixType m_CoreMatrix; // Knm * inv(Kmm) * Kmn
private:
SparseGaussianProcess(const Self &); //purposely not implemented
void operator=(const Self &); //purposely not implemented
friend class SparseLikelihood<TScalarType>;
};
} // namespace gpr
#include "SparseLikelihood.h"
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 32;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32));t3<=min(min(min(floord(4*t2+Ny,32),floord(Nt+Ny-4,32)),floord(2*t1+Ny+1,32)),floord(4*t1-4*t2+Nz+Ny-1,32));t3++) {
for (t4=max(max(max(0,ceild(t1-511,512)),ceild(4*t2-Nz-1020,1024)),ceild(32*t3-Ny-1020,1024));t4<=min(min(min(min(floord(4*t2+Nx,1024),floord(Nt+Nx-4,1024)),floord(2*t1+Nx+1,1024)),floord(32*t3+Nx+28,1024)),floord(4*t1-4*t2+Nz+Nx-1,1024));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),32*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),32*t3+30),1024*t4+1022),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(1024*t4,t5+1);
ubv=min(1024*t4+1023,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
pr54017.c | /* PR middle-end/54017 */
/* { dg-do compile } */
/* { dg-options "-fopenmp" } */
void
f1 (void)
{
#pragma omp parallel sections
{
#pragma omp section
{
for (;;)
;
}
}
}
int
f2 (void)
{
int i = 0;
#pragma omp parallel
#pragma omp sections reduction(+:i)
{
#pragma omp section
{
for (;;)
;
}
}
return i;
}
void
f3 (void)
{
#pragma omp parallel sections
{
#pragma omp section
{
for (;;)
;
}
#pragma omp section
;
}
}
int
f4 (void)
{
int i = 0;
#pragma omp parallel
#pragma omp sections reduction(+:i)
{
#pragma omp section
{
for (;;)
;
}
#pragma omp section
;
}
return i;
}
|
maxpool_layer.c | #include "maxpool_layer.h"
#include "convolutional_layer.h"
#include "dark_cuda.h"
#include "gemm.h"
#include <stdio.h>
image get_maxpool_image(maxpool_layer l)
{
int h = l.out_h;
int w = l.out_w;
int c = l.c;
return float_to_image(w,h,c,l.output);
}
image get_maxpool_delta(maxpool_layer l)
{
int h = l.out_h;
int w = l.out_w;
int c = l.c;
return float_to_image(w,h,c,l.delta);
}
void create_maxpool_cudnn_tensors(layer *l)
{
#ifdef CUDNN
CHECK_CUDNN(cudnnCreatePoolingDescriptor(&l->poolingDesc));
CHECK_CUDNN(cudnnCreateTensorDescriptor(&l->srcTensorDesc));
CHECK_CUDNN(cudnnCreateTensorDescriptor(&l->dstTensorDesc));
#endif // CUDNN
}
void cudnn_maxpool_setup(layer *l)
{
#ifdef CUDNN
CHECK_CUDNN(cudnnSetPooling2dDescriptor(
l->poolingDesc,
CUDNN_POOLING_MAX,
CUDNN_NOT_PROPAGATE_NAN, // CUDNN_PROPAGATE_NAN, CUDNN_NOT_PROPAGATE_NAN
l->size,
l->size,
l->pad/2, //0, //l.pad,
l->pad/2, //0, //l.pad,
l->stride_x,
l->stride_y));
CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w));
CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w));
#endif // CUDNN
}
maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride_x, int stride_y, int padding, int maxpool_depth, int out_channels, int antialiasing, int train)
{
maxpool_layer l = { (LAYER_TYPE)0 };
l.type = MAXPOOL;
l.train = train;
const int blur_stride_x = stride_x;
const int blur_stride_y = stride_y;
l.antialiasing = antialiasing;
if (antialiasing) {
stride_x = stride_y = l.stride = l.stride_x = l.stride_y = 1; // use stride=1 in host-layer
}
l.batch = batch;
l.h = h;
l.w = w;
l.c = c;
l.pad = padding;
l.maxpool_depth = maxpool_depth;
l.out_channels = out_channels;
if (maxpool_depth) {
l.out_c = out_channels;
l.out_w = l.w;
l.out_h = l.h;
}
else {
l.out_w = (w + padding - size) / stride_x + 1;
l.out_h = (h + padding - size) / stride_y + 1;
l.out_c = c;
}
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = h*w*c;
l.size = size;
l.stride = stride_x;
l.stride_x = stride_x;
l.stride_y = stride_y;
int output_size = l.out_h * l.out_w * l.out_c * batch;
if (train) {
l.indexes = (int*)calloc(output_size, sizeof(int));
l.delta = (float*)calloc(output_size, sizeof(float));
}
l.output = (float*)calloc(output_size, sizeof(float));
l.forward = forward_maxpool_layer;
l.backward = backward_maxpool_layer;
#ifdef GPU
l.forward_gpu = forward_maxpool_layer_gpu;
l.backward_gpu = backward_maxpool_layer_gpu;
if (train) {
l.indexes_gpu = cuda_make_int_array(output_size);
l.delta_gpu = cuda_make_array(l.delta, output_size);
}
l.output_gpu = cuda_make_array(l.output, output_size);
create_maxpool_cudnn_tensors(&l);
cudnn_maxpool_setup(&l);
#endif // GPU
l.bflops = (l.size*l.size*l.c * l.out_h*l.out_w) / 1000000000.;
if (maxpool_depth)
fprintf(stderr, "max-depth %2dx%2d/%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
else if(stride_x == stride_y)
fprintf(stderr, "max %2dx%2d/%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
else
fprintf(stderr, "max %2dx%2d/%2dx%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, stride_y, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
if (l.antialiasing) {
printf("AA: ");
l.input_layer = (layer*)calloc(1, sizeof(layer));
int blur_size = 3;
int blur_pad = blur_size / 2;
if (l.antialiasing == 2) {
blur_size = 2;
blur_pad = 0;
}
*(l.input_layer) = make_convolutional_layer(batch, 1, l.out_h, l.out_w, l.out_c, l.out_c, l.out_c, blur_size, blur_stride_x, blur_stride_y, 1, blur_pad, LINEAR, 0, 0, 0, 0, 0, 1, 0, NULL, 0, train);
const int blur_nweights = l.out_c * blur_size * blur_size; // (n / n) * n * blur_size * blur_size;
int i;
if (blur_size == 2) {
for (i = 0; i < blur_nweights; i += (blur_size*blur_size)) {
l.input_layer->weights[i + 0] = 1 / 4.f;
l.input_layer->weights[i + 1] = 1 / 4.f;
l.input_layer->weights[i + 2] = 1 / 4.f;
l.input_layer->weights[i + 3] = 1 / 4.f;
}
}
else {
for (i = 0; i < blur_nweights; i += (blur_size*blur_size)) {
l.input_layer->weights[i + 0] = 1 / 16.f;
l.input_layer->weights[i + 1] = 2 / 16.f;
l.input_layer->weights[i + 2] = 1 / 16.f;
l.input_layer->weights[i + 3] = 2 / 16.f;
l.input_layer->weights[i + 4] = 4 / 16.f;
l.input_layer->weights[i + 5] = 2 / 16.f;
l.input_layer->weights[i + 6] = 1 / 16.f;
l.input_layer->weights[i + 7] = 2 / 16.f;
l.input_layer->weights[i + 8] = 1 / 16.f;
}
}
for (i = 0; i < l.out_c; ++i) l.input_layer->biases[i] = 0;
#ifdef GPU
if (gpu_index >= 0) {
l.input_antialiasing_gpu = cuda_make_array(NULL, l.batch*l.outputs);
push_convolutional_layer(*(l.input_layer));
}
#endif // GPU
}
return l;
}
void resize_maxpool_layer(maxpool_layer *l, int w, int h)
{
l->h = h;
l->w = w;
l->inputs = h*w*l->c;
l->out_w = (w + l->pad - l->size) / l->stride_x + 1;
l->out_h = (h + l->pad - l->size) / l->stride_y + 1;
l->outputs = l->out_w * l->out_h * l->out_c;
int output_size = l->outputs * l->batch;
if (l->train) {
l->indexes = (int*)realloc(l->indexes, output_size * sizeof(int));
l->delta = (float*)realloc(l->delta, output_size * sizeof(float));
}
l->output = (float*)realloc(l->output, output_size * sizeof(float));
#ifdef GPU
CHECK_CUDA(cudaFree(l->output_gpu));
l->output_gpu = cuda_make_array(l->output, output_size);
if (l->train) {
CHECK_CUDA(cudaFree((float *)l->indexes_gpu));
CHECK_CUDA(cudaFree(l->delta_gpu));
l->indexes_gpu = cuda_make_int_array(output_size);
l->delta_gpu = cuda_make_array(l->delta, output_size);
}
cudnn_maxpool_setup(l);
#endif
}
void forward_maxpool_layer(const maxpool_layer l, network_state state)
{
if (l.maxpool_depth)
{
int b, i, j, k, g;
for (b = 0; b < l.batch; ++b) {
#pragma omp parallel for
for (i = 0; i < l.h; ++i) {
for (j = 0; j < l.w; ++j) {
for (g = 0; g < l.out_c; ++g)
{
int out_index = j + l.w*(i + l.h*(g + l.out_c*b));
float max = -FLT_MAX;
int max_i = -1;
for (k = g; k < l.c; k += l.out_c)
{
int in_index = j + l.w*(i + l.h*(k + l.c*b));
float val = state.input[in_index];
max_i = (val > max) ? in_index : max_i;
max = (val > max) ? val : max;
}
l.output[out_index] = max;
if (l.indexes) l.indexes[out_index] = max_i;
}
}
}
}
return;
}
if (!state.train && l.stride_x == l.stride_y) {
forward_maxpool_layer_avx(state.input, l.output, l.indexes, l.size, l.w, l.h, l.out_w, l.out_h, l.c, l.pad, l.stride, l.batch);
}
else {
int b, i, j, k, m, n;
int w_offset = -l.pad / 2;
int h_offset = -l.pad / 2;
int h = l.out_h;
int w = l.out_w;
int c = l.c;
for (b = 0; b < l.batch; ++b) {
for (k = 0; k < c; ++k) {
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j) {
int out_index = j + w*(i + h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for (n = 0; n < l.size; ++n) {
for (m = 0; m < l.size; ++m) {
int cur_h = h_offset + i*l.stride_y + n;
int cur_w = w_offset + j*l.stride_x + m;
int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c));
int valid = (cur_h >= 0 && cur_h < l.h &&
cur_w >= 0 && cur_w < l.w);
float val = (valid != 0) ? state.input[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
l.output[out_index] = max;
if (l.indexes) l.indexes[out_index] = max_i;
}
}
}
}
}
if (l.antialiasing) {
network_state s = { 0 };
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
s.input = l.output;
forward_convolutional_layer(*(l.input_layer), s);
//simple_copy_ongpu(l.outputs*l.batch, l.output, l.input_antialiasing);
memcpy(l.output, l.input_layer->output, l.input_layer->outputs * l.input_layer->batch * sizeof(float));
}
}
void backward_maxpool_layer(const maxpool_layer l, network_state state)
{
int i;
int h = l.out_h;
int w = l.out_w;
int c = l.out_c;
#pragma omp parallel for
for(i = 0; i < h*w*c*l.batch; ++i){
int index = l.indexes[i];
state.delta[index] += l.delta[i];
}
}
|
rankktensor.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
#include <stdlib.h>
#include <string.h>
int ptiNewRankKruskalTensor(ptiRankKruskalTensor *ktsr, ptiIndex nmodes, const ptiIndex ndims[], ptiElementIndex rank)
{
ktsr->nmodes = nmodes;
ktsr->rank = rank;
ktsr->ndims = (ptiIndex*)malloc(nmodes*sizeof(ptiIndex));
for(ptiIndex i=0; i<nmodes; ++i)
ktsr->ndims[i] = ndims[i];
ktsr->lambda = (ptiValue*)malloc(rank*sizeof(ptiValue));
ktsr->fit = 0.0;
return 0;
}
/**
* Shuffle factor matrices row indices.
*
* @param[in] ktsr Kruskal tensor to be shuffled
* @param[out] map_inds is the renumbering mapping
*
*/
void ptiRankKruskalTensorInverseShuffleIndices(ptiRankKruskalTensor * ktsr, ptiIndex ** map_inds) {
/* Renumber factor matrices rows */
ptiIndex new_i;
for(ptiIndex m=0; m < ktsr->nmodes; ++m) {
ptiRankMatrix * mtx = ktsr->factors[m];
ptiIndex * mode_map_inds = map_inds[m];
ptiValue * tmp_values = malloc(mtx->cap * mtx->stride * sizeof (ptiValue));
for(ptiIndex i=0; i<mtx->nrows; ++i) {
new_i = mode_map_inds[i];
for(ptiElementIndex j=0; j<mtx->ncols; ++j) {
tmp_values[i * mtx->stride + j] = mtx->values[new_i * mtx->stride + j];
}
}
free(mtx->values);
mtx->values = tmp_values;
}
}
void ptiFreeRankKruskalTensor(ptiRankKruskalTensor *ktsr)
{
ktsr->rank = 0;
ktsr->fit = 0.0;
free(ktsr->ndims);
free(ktsr->lambda);
for(ptiIndex i=0; i<ktsr->nmodes; ++i)
ptiFreeRankMatrix(ktsr->factors[i]);
free(ktsr->factors);
ktsr->nmodes = 0;
}
double KruskalTensorFitHiCOO(
ptiSparseTensorHiCOO const * const hitsr,
ptiValue const * const __restrict lambda,
ptiRankMatrix ** mats,
ptiRankMatrix ** ata)
{
ptiIndex const nmodes = hitsr->nmodes;
double ptien_normsq = SparseTensorFrobeniusNormSquaredHiCOO(hitsr);
// printf("ptien_normsq: %lf\n", ptien_normsq);
double const norm_mats = KruskalTensorFrobeniusNormSquaredRank(nmodes, lambda, ata);
// printf("norm_mats: %lf\n", norm_mats);
double const inner = SparseKruskalTensorInnerProductRank(nmodes, lambda, mats);
// printf("inner: %lf\n", inner);
double residual = ptien_normsq + norm_mats - 2 * inner;
if (residual > 0.0) {
residual = sqrt(residual);
}
// printf("residual: %lf\n", residual);
double fit = 1 - (residual / sqrt(ptien_normsq));
return fit;
}
// Column-major.
/* Compute a Kruskal tensor's norm is compute on "ata"s. Check Tammy's sparse */
double KruskalTensorFrobeniusNormSquaredRank(
ptiIndex const nmodes,
ptiValue const * const __restrict lambda,
ptiRankMatrix ** ata) // ata: column-major
{
ptiElementIndex const rank = ata[0]->ncols;
ptiElementIndex const stride = ata[0]->stride;
ptiValue * const __restrict tmp_atavals = ata[nmodes]->values; // Column-major
double norm_mats = 0;
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(ptiIndex x=0; x < rank*stride; ++x) {
tmp_atavals[x] = 1.;
}
// printf("KruskalTensorFrobeniusNormSquaredRank: \n");
// ptiDumpRankMatrix(ata[nmodes], stdout);
/* Compute Hadamard product for all "ata"s */
for(ptiIndex m=0; m < nmodes; ++m) {
ptiValue const * const __restrict atavals = ata[m]->values;
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(ptiElementIndex i=0; i < rank; ++i) {
for(ptiElementIndex j=i; j < rank; ++j) {
tmp_atavals[j * stride + i] *= atavals[j * stride + i];
}
}
}
// printf("KruskalTensorFrobeniusNormSquaredRank: \n");
// ptiDumpRankMatrix(ata[nmodes], stdout);
/* compute lambda^T * aTa[MAX_NMODES] * lambda, only compute a half of them because of its symmetric */
// #ifdef HIPARTI_USE_OPENMP
// #pragma omp parallel for schedule(static) reduction(+:norm_mats)
// #endif
for(ptiElementIndex i=0; i < rank; ++i) {
norm_mats += tmp_atavals[i+(i*stride)] * lambda[i] * lambda[i];
for(ptiElementIndex j=i+1; j < rank; ++j) {
norm_mats += tmp_atavals[i+(j*stride)] * lambda[i] * lambda[j] * 2;
}
// printf("inter norm_mats: %lf\n", norm_mats);
}
return fabs(norm_mats);
}
// Row-major, compute via MTTKRP result (mats[nmodes]) and mats[nmodes-1].
double SparseKruskalTensorInnerProductRank(
ptiIndex const nmodes,
ptiValue const * const __restrict lambda,
ptiRankMatrix ** mats)
{
ptiElementIndex const rank = mats[0]->ncols;
ptiElementIndex const stride = mats[0]->stride;
ptiIndex const last_mode = nmodes - 1;
ptiIndex const I = mats[last_mode]->nrows;
// printf("mats[nmodes-1]:\n");
// ptiDumpMatrix(mats[nmodes-1], stdout);
// printf("mats[nmodes]:\n");
// ptiDumpMatrix(mats[nmodes], stdout);
ptiValue const * const last_vals = mats[last_mode]->values;
ptiValue const * const tmp_vals = mats[nmodes]->values;
ptiValue * buffer_accum;
double inner = 0;
double * const __restrict accum = (double *) malloc(rank*sizeof(*accum));
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for schedule(static)
#endif
for(ptiElementIndex r=0; r < rank; ++r) {
accum[r] = 0.0;
}
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel
{
int const nthreads = omp_get_num_threads();
#pragma omp master
{
buffer_accum = (ptiValue *)malloc(nthreads * rank * sizeof(ptiValue));
for(ptiIndex j=0; j < (ptiIndex)nthreads * rank; ++j)
buffer_accum[j] = 0.0;
}
}
#endif
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel
{
int const tid = omp_get_thread_num();
int const nthreads = omp_get_num_threads();
ptiValue * loc_accum = buffer_accum + tid * rank;
#pragma omp for
for(ptiIndex i=0; i < I; ++i) {
for(ptiElementIndex r=0; r < rank; ++r) {
loc_accum[r] += last_vals[r+(i*stride)] * tmp_vals[r+(i*stride)];
}
}
#pragma omp for schedule(static)
for(ptiElementIndex j=0; j < rank; ++j) {
for(ptiIndex i=0; i < (ptiIndex)nthreads; ++i) {
accum[j] += buffer_accum[i*rank + j];
}
}
}
#else
for(ptiIndex i=0; i < I; ++i) {
for(ptiElementIndex r=0; r < rank; ++r) {
accum[r] += last_vals[r+(i*stride)] * tmp_vals[r+(i*stride)];
}
}
#endif
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for schedule(static) reduction(+:inner)
#endif
for(ptiElementIndex r=0; r < rank; ++r) {
inner += accum[r] * lambda[r];
}
#ifdef HIPARTI_USE_OPENMP
free(buffer_accum);
#endif
return inner;
} |
sum.c | #include "stdio.h"
#include "stdlib.h"
#include "math.h"
#include "time.h"
#include "omp.h"
void generate_random(double *input, size_t size) {
for (size_t i = 0; i < size; i++) {
input[i] = rand() / (double)(RAND_MAX);
}
}
void compute_statistics(double *times, size_t n_runs, double *avg, double *std_dev) {
*avg = 0;
for (int i = 0; i < n_runs; ++i) {
*avg += times[i];
}
*avg /= n_runs;
*std_dev = 0;
for (int i = 0; i < n_runs; ++i) {
*std_dev += pow(times[i] - *avg, 2);
}
*std_dev /= n_runs;
*std_dev = sqrt(*std_dev);
}
double serial_sum(double *x, size_t size) {
double sum_val = 0.0;
for (size_t i = 0; i < size; i++) {
sum_val += x[i];
}
return sum_val;
}
double omp_sum(double *x, size_t size) {
double sum_val = 0.0;
#pragma omp parallel for
for (size_t i = 0; i < size; i++) {
sum_val += x[i];
}
return sum_val;
}
double omp_critical_sum(double *x, size_t size) {
double sum_val = 0.0;
#pragma omp parallel for
for (size_t i = 0; i < size; i++) {
#pragma omp critical
{
sum_val += x[i];
}
}
return sum_val;
}
double omp_local_sum(double *x, size_t size, int n_threads) {
double sum_val;
double sum[n_threads];
#pragma omp parallel shared(sum)
{
int id = omp_get_thread_num();
sum[id] = 0;
#pragma omp for
for (size_t i = 0; i < size; i++) {
sum[id] += x[i];
}
}
sum_val = 0;
for (size_t i = 0; i < n_threads; ++i) {
sum_val += sum[i];
}
return sum_val;
}
typedef struct {
double val;
char pad[128];
} tSum;
double opt_local_sum(double *x, size_t size, int n_threads) {
double sum_val;
tSum sum[n_threads];
#pragma omp parallel shared(sum)
{
int id = omp_get_thread_num();
sum[id].val = 0;
#pragma omp for
for (size_t i = 0; i < size; i++) {
sum[id].val += x[i];
}
}
sum_val = 0;
for (size_t i = 0; i < n_threads; ++i) {
sum_val += sum[i].val;
}
return sum_val;
}
void measure_serial_sum(double *x, size_t size, size_t n_runs) {
double start_time;
double times[n_runs];
double warmup_sum, total_sum;
// Warmup run
warmup_sum = serial_sum(x, size);
for (int i = 0; i < n_runs; ++i) {
start_time = omp_get_wtime();
// Actual computation
total_sum = serial_sum(x, size);
times[i] = omp_get_wtime() - start_time;
if (abs(total_sum - warmup_sum) > 0.001)
printf("Different results in serial_sum\n");
}
// Print average times and standard deviation
double avg, std_dev;
compute_statistics(times, n_runs, &avg, &std_dev);
printf("serial sum - Average: %fs - Std. deviation: %fs - Sum value: %f\n", avg, std_dev, total_sum);
}
void measure_omp_sum(double *x, size_t size, size_t n_runs) {
double start_time;
double times[n_runs];
double warmup_sum, total_sum;
// Warmup run
omp_set_num_threads(1);
warmup_sum = omp_sum(x, size);
// Vary the number of threads: 1, 2, 4, 8, 16, 20, 24, 28, 32
for (int n_threads = 1; n_threads <= 32; n_threads = (n_threads <= 8) ? 2 * n_threads : n_threads + 4) {
omp_set_num_threads(n_threads);
for (int i = 0; i < n_runs; ++i) {
start_time = omp_get_wtime();
// Actual computation
total_sum = omp_sum(x, size);
times[i] = omp_get_wtime() - start_time;
if (abs(total_sum - warmup_sum) > 0.001)
printf("Different results in omp_sum with %2d threads: %f\n", n_threads, total_sum);
}
// Print average times and standard deviation
double avg, std_dev;
compute_statistics(times, n_runs, &avg, &std_dev);
printf("omp sum (%2d threads) - Average: %fs - Std. deviation: %fs - Sum value: %f\n", n_threads, avg, std_dev, total_sum);
}
}
void measure_omp_critical_sum(double *x, size_t size, size_t n_runs) {
double start_time;
double times[n_runs];
double warmup_sum, total_sum;
// Warmup run
omp_set_num_threads(1);
warmup_sum = omp_critical_sum(x, size);
// Vary the number of threads: 1, 2, 4, 8, 16, 20, 24, 28, 32
for (int n_threads = 1; n_threads <= 32; n_threads = (n_threads <= 8) ? 2 * n_threads : n_threads + 4) {
omp_set_num_threads(n_threads);
for (int i = 0; i < n_runs; ++i) {
start_time = omp_get_wtime();
// Actual computation
total_sum = omp_critical_sum(x, size);
times[i] = omp_get_wtime() - start_time;
if (abs(total_sum - warmup_sum) > 0.001)
printf("Different results in omp_critical_sum with %2d threads: %f\n", n_threads, total_sum);
}
// Print average times and standard deviation
double avg, std_dev;
compute_statistics(times, n_runs, &avg, &std_dev);
printf("omp critical sum (%2d threads) - Average: %fs - Std. deviation: %fs - Sum value: %f\n", n_threads, avg, std_dev, total_sum);
}
}
void measure_omp_local_sum(double *x, size_t size, size_t n_runs) {
double start_time;
double times[n_runs];
double warmup_sum, total_sum;
// Warmup run
omp_set_num_threads(1);
warmup_sum = omp_local_sum(x, size, 1);
// Vary the number of threads: 1, 2, 4, 8, 16, 20, 24, 28, 32
for (int n_threads = 1; n_threads <= 32; n_threads = (n_threads <= 8) ? 2 * n_threads : n_threads + 4) {
omp_set_num_threads(n_threads);
for (int i = 0; i < n_runs; ++i) {
start_time = omp_get_wtime();
// Actual computation
total_sum = omp_local_sum(x, size, n_threads);
times[i] = omp_get_wtime() - start_time;
if (abs(total_sum - warmup_sum) > 0.001)
printf("Different results in omp_local_sum with %2d threads: %f\n", n_threads, total_sum);
}
// Print average times and standard deviation
double avg, std_dev;
compute_statistics(times, n_runs, &avg, &std_dev);
printf("omp local sum (%2d threads) - Average: %fs - Std. deviation: %fs - Sum value: %f\n", n_threads, avg, std_dev, total_sum);
}
}
void measure_opt_local_sum(double *x, size_t size, size_t n_runs) {
double start_time;
double times[n_runs];
double warmup_sum, total_sum;
// Warmup run
omp_set_num_threads(1);
warmup_sum = opt_local_sum(x, size, 1);
// Vary the number of threads: 1, 2, 4, 8, 16, 20, 24, 28, 32
for (int n_threads = 1; n_threads <= 32; n_threads = (n_threads <= 8) ? 2 * n_threads : n_threads + 4) {
omp_set_num_threads(n_threads);
for (int i = 0; i < n_runs; ++i) {
start_time = omp_get_wtime();
// Actual computation
total_sum = opt_local_sum(x, size, n_threads);
times[i] = omp_get_wtime() - start_time;
if (abs(total_sum - warmup_sum) > 0.001)
printf("Different results in opt_local_sum with %2d threads: %f\n", n_threads, total_sum);
}
// Print average times and standard deviation
double avg, std_dev;
compute_statistics(times, n_runs, &avg, &std_dev);
printf("opt local sum (%2d threads) - Average: %fs - Std. deviation: %fs - Sum value: %f\n", n_threads, avg, std_dev, total_sum);
}
}
int main() {
const size_t SIZE = 10000000;
const size_t NRUNS = 10;
double *x;
srand(time(NULL));
x = malloc(SIZE * sizeof(double));
generate_random(x, SIZE);
printf("==========\n");
measure_serial_sum(x, SIZE, NRUNS);
printf("==========\n");
measure_omp_sum(x, SIZE, NRUNS);
printf("==========\n");
measure_omp_critical_sum(x, SIZE, NRUNS);
printf("==========\n");
measure_omp_local_sum(x, SIZE, NRUNS);
printf("==========\n");
measure_opt_local_sum(x, SIZE, NRUNS);
printf("==========\n");
free(x);
return 0;
}
|
GB_unaryop__ainv_int8_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int8_fp64
// op(A') function: GB_tran__ainv_int8_fp64
// C type: int8_t
// A type: double
// cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8)
// unaryop: cij = -aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int8_t z ; GB_CAST_SIGNED(z,x,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int8_fp64
(
int8_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
/// All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
std::unique_ptr<MangleNumberingContext> MangleNumbering;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering(),
ExprContext(ExprContext) {}
/// Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
clang::Module *Module = nullptr;
bool ModuleInterface = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *&Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
Partition, ///< 'module partition X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path);
/// The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(
NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted,
bool IsUnavailable, StringRef Message, bool IsStrict,
StringRef Replacement, AvailabilityMergeKind AMK, int Priority,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex, StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf ///< Condition in a constexpr if statement.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit,
bool AllowResultConversion = true);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit,
bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
bool IsConstexprSpecified);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, IdentifierInfo *Id,
LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef,
IdentifierInfo *Id,
bool DirectInit, Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation());
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false,
bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *&Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation), Entity(nullptr), Template(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
unsigned SpellingListIndex, bool InInstantiation = false);
void AddParameterABIAttr(SourceRange AttrRange, Decl *D,
ParameterABI ABI, unsigned SpellingListIndex);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min,
Expr *Max, unsigned SpellingListIndex);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min,
Expr *Max, unsigned SpellingListIndex);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
public:
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
NamedDeclSetType &SameDirectiveDecls);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return true if (un)supported features for the current target should be
/// diagnosed if OpenMP (offloading) is enabled.
bool shouldDiagnoseTargetSupportFromOpenMP() const {
return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() ||
isInOpenMPTargetExecutionDirective();
}
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext, QualType BaseType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDefaultedMemberExceptionSpecs.empty() &&
"there shouldn't be any pending delayed defaulted member "
"exception specs");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
decltype(DelayedDefaultedMemberExceptionSpecs)
SavedDefaultedMemberExceptionSpecs;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
SavedDefaultedMemberExceptionSpecs.swap(
S.DelayedDefaultedMemberExceptionSpecs);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
GB_binop__iseq_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int64)
// A*D function (colscale): GB (_AxD__iseq_int64)
// D*A function (rowscale): GB (_DxB__iseq_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int64)
// C=scalar+B GB (_bind1st__iseq_int64)
// C=scalar+B' GB (_bind1st_tran__iseq_int64)
// C=A+scalar GB (_bind2nd__iseq_int64)
// C=A'+scalar GB (_bind2nd_tran__iseq_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT64 || GxB_NO_ISEQ_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB066-pointernoaliasing-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Freshly allocated pointers do not alias to each other.
*/
#include <stdlib.h>
void setup(int N)
{
double * m_pdv_sum = (double* ) malloc (sizeof (double) * N );
double * m_nvol = (double* ) malloc (sizeof (double) * N );
#pragma omp parallel for
for (int i=0; i < N; ++i )
{
m_pdv_sum[ i ] = 0.0;
m_nvol[ i ] = i*2.5;
}
for (int i=0; i < N; ++i )
{
printf("%lf\n", m_pdv_sum[ i ]);
printf("%lf\n", m_nvol[ i ]);
}
free(m_pdv_sum);
free(m_nvol);
}
int main()
{
int N =1000;
setup(N);
}
|
fused_rowwise_nbit_conversion_ops.h | #pragma once
#include <algorithm>
#include <vector>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
// for param_search_greedy
#include "caffe2/operators/fused_rowwise_nbitfake_conversion_ops.h"
namespace caffe2 {
template <
int BIT_RATE,
typename T,
void (*convert)(float* dst, const T* src, size_t N),
bool GREEDY = false>
class FloatToFusedNBitRowwiseQuantizedOp final : public Operator<CPUContext> {
public:
FloatToFusedNBitRowwiseQuantizedOp(const OperatorDef& def, Workspace* ws)
: Operator<CPUContext>(def, ws) {}
~FloatToFusedNBitRowwiseQuantizedOp() override {}
bool RunOnDevice() override {
CAFFE_ENFORCE(internal::is_little_endian(), "Unsupported endianness");
const auto& input = Input(DATA_FLOAT);
CAFFE_ENFORCE_GT(input.dim(), 0, "Input's dimension must be at least 1");
const auto input_rows = input.size_to_dim(input.dim() - 1);
const auto input_columns = input.size(input.dim() - 1);
static_assert(8 % BIT_RATE == 0, "BIT_RATE must divide 8");
constexpr int NUM_ELEM_PER_BYTE = 8 / BIT_RATE;
CAFFE_ENFORCE_EQ(
input.dim(input.dim() - 1) % NUM_ELEM_PER_BYTE,
0,
"FloatToFused" + std::to_string(BIT_RATE) +
"BitRowwiseQuantizedOp only works for the number of "
"columns a multiple of " +
std::to_string(NUM_ELEM_PER_BYTE));
// The "fused" representation stores the scale and bias with the
// row-wise quantized data in one tensor.
// Since we represent the scale and bias in 16-bit float, we'll use the
// last 4 bytes of each row for scale (2 bytes) and bias (2 bytes).
// | ... quantized data ... | scale | bias |
// | number_of_columns | 2B | 2B |
auto output_dimensions = input.sizes().vec();
output_dimensions[input.dim() - 1] = static_cast<std::int64_t>(
(input_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE +
2 * sizeof(at::Half));
auto* output = Output(
DATA_FUSED_SCALE_BIAS, output_dimensions, at::dtype<std::uint8_t>());
const auto* input_data = input.template data<T>();
auto* output_data = output->template mutable_data<std::uint8_t>();
const auto output_columns = output->size(output->dim() - 1);
#ifdef _OPENMP
vector<float> tmp_vec(input_columns * (GREEDY ? omp_get_max_threads() : 1));
#else
vector<float> tmp_vec(input_columns);
#endif
#pragma omp parallel for if (GREEDY)
for (int row = 0; row < input_rows; ++row) {
float* tmp = tmp_vec.data();
#ifdef _OPENMP
if (GREEDY) {
tmp = &tmp_vec[omp_get_thread_num() * input_columns];
}
#endif
convert(tmp, input_data + row * input_columns, input_columns);
std::uint8_t* output_row = output_data + row * output_columns;
at::Half* output_row_scale = reinterpret_cast<at::Half*>(
output_row +
(input_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE);
at::Half* output_row_bias = reinterpret_cast<at::Half*>(
output_row +
(input_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE +
sizeof(at::Half));
float Xmin = *std::min_element(tmp, tmp + input_columns);
float Xmax = *std::max_element(tmp, tmp + input_columns);
if (GREEDY) {
C10_LOG_EVERY_N(INFO, 100) << "Running the GREEDY engine!";
internal::param_search_greedy(
tmp, input_columns, 200, 0.16, Xmin, Xmax, BIT_RATE);
}
// Round Xmin to fp16 to match with dequantization that will use fp16
// for Xmin.
Xmin = static_cast<at::Half>(Xmin);
const float range = Xmax - Xmin;
// Round scale to fp16 to match with dequantization that will use fp16
// for scale.
// Set scale to 1.0f for the corner case of Xmax == Xmin .
// Any non-zero scale would work because during quantization
// (X - Xmin) / scale will be 0 for all X unless scale is 0.
at::Half scale = range == 0 ? 1.0f : range / ((1 << BIT_RATE) - 1);
if (scale == 0) {
// Corner case handling when Xmax == Xmin
// Any scale would work because X - Xmin will be 0 for all X
scale = 1.0f;
}
*output_row_scale = scale;
*output_row_bias = Xmin;
for (int col = 0; col < input_columns; ++col) {
float X = tmp[col];
std::uint8_t quantized = std::max(
0,
std::min<int>(
std::lrintf((X - Xmin) / scale), (1 << BIT_RATE) - 1));
if (col % NUM_ELEM_PER_BYTE == 0) {
// LSB
output_row[col / NUM_ELEM_PER_BYTE] = quantized;
} else {
output_row[col / NUM_ELEM_PER_BYTE] |=
(quantized << ((col % NUM_ELEM_PER_BYTE) * BIT_RATE));
}
}
}
return true;
}
private:
INPUT_TAGS(DATA_FLOAT);
OUTPUT_TAGS(DATA_FUSED_SCALE_BIAS);
};
template <
int BIT_RATE,
typename T,
void (*convert)(T* dst, const float* src, size_t N)>
class FusedNBitRowwiseQuantizedToFloatOp final : public Operator<CPUContext> {
public:
FusedNBitRowwiseQuantizedToFloatOp(const OperatorDef& def, Workspace* ws)
: Operator<CPUContext>(def, ws) {}
~FusedNBitRowwiseQuantizedToFloatOp() override {}
bool RunOnDevice() override {
CAFFE_ENFORCE(internal::is_little_endian(), "Unsupported endianness");
const auto& input = Input(DATA_FUSED_SCALE_BIAS);
CAFFE_ENFORCE_GT(input.dim(), 0, "Input's dimension must be at least 1");
const auto input_rows = input.size_to_dim(input.dim() - 1);
const auto input_columns = input.size(input.dim() - 1);
static_assert(8 % BIT_RATE == 0, "BIT_RATE must divide 8");
constexpr int NUM_ELEM_PER_BYTE = 8 / BIT_RATE;
// The last 4 bytes per row are two fp16 scale and bias.
// The rest of input_columns is the number of values in the original row.
auto output_dimensions = input.sizes().vec();
output_dimensions[input.dim() - 1] =
static_cast<std::int64_t>(input_columns - 2 * sizeof(at::Half)) *
NUM_ELEM_PER_BYTE;
auto* output = Output(DATA_FLOAT, output_dimensions, at::dtype<T>());
const auto output_columns = output->size(output->dim() - 1);
const auto* input_data = input.template data<std::uint8_t>();
T* output_data = output->template mutable_data<T>();
std::vector<float> tmp(output_columns);
for (size_t row = 0; row < input_rows; ++row) {
const std::uint8_t* input_row = input_data + row * input_columns;
float scale = *reinterpret_cast<const at::Half*>(
input_row +
(output_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE);
float bias = *reinterpret_cast<const at::Half*>(
input_row +
(output_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE +
sizeof(at::Half));
for (int col = 0; col < output_columns; ++col) {
std::uint8_t quantized = input_row[col / NUM_ELEM_PER_BYTE];
quantized >>= (col % NUM_ELEM_PER_BYTE) * BIT_RATE;
quantized &= (1 << BIT_RATE) - 1;
tmp[col] = scale * quantized + bias;
}
convert(output_data + row * output_columns, tmp.data(), output_columns);
}
return true;
}
private:
INPUT_TAGS(DATA_FUSED_SCALE_BIAS);
OUTPUT_TAGS(DATA_FLOAT);
};
} // namespace caffe2
|
Stmt.h | //===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <string>
namespace llvm {
class FoldingSetNodeID;
}
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class IdentifierInfo;
class LabelDecl;
class ParmVarDecl;
class PrinterHelper;
struct PrintingPolicy;
class QualType;
class RecordDecl;
class SourceManager;
class StringLiteral;
class SwitchStmt;
class Token;
class VarDecl;
//===--------------------------------------------------------------------===//
// ExprIterator - Iterators for iterating over Stmt* arrays that contain
// only Expr*. This is needed because AST nodes use Stmt* arrays to store
// references to children (to be compatible with StmtIterator).
//===--------------------------------------------------------------------===//
class Stmt;
class Expr;
class ExprIterator {
Stmt** I;
public:
ExprIterator(Stmt** i) : I(i) {}
ExprIterator() : I(0) {}
ExprIterator& operator++() { ++I; return *this; }
ExprIterator operator-(size_t i) { return I-i; }
ExprIterator operator+(size_t i) { return I+i; }
Expr* operator[](size_t idx);
// FIXME: Verify that this will correctly return a signed distance.
signed operator-(const ExprIterator& R) const { return I - R.I; }
Expr* operator*() const;
Expr* operator->() const;
bool operator==(const ExprIterator& R) const { return I == R.I; }
bool operator!=(const ExprIterator& R) const { return I != R.I; }
bool operator>(const ExprIterator& R) const { return I > R.I; }
bool operator>=(const ExprIterator& R) const { return I >= R.I; }
};
class ConstExprIterator {
const Stmt * const *I;
public:
ConstExprIterator(const Stmt * const *i) : I(i) {}
ConstExprIterator() : I(0) {}
ConstExprIterator& operator++() { ++I; return *this; }
ConstExprIterator operator+(size_t i) const { return I+i; }
ConstExprIterator operator-(size_t i) const { return I-i; }
const Expr * operator[](size_t idx) const;
signed operator-(const ConstExprIterator& R) const { return I - R.I; }
const Expr * operator*() const;
const Expr * operator->() const;
bool operator==(const ConstExprIterator& R) const { return I == R.I; }
bool operator!=(const ConstExprIterator& R) const { return I != R.I; }
bool operator>(const ConstExprIterator& R) const { return I > R.I; }
bool operator>=(const ConstExprIterator& R) const { return I >= R.I; }
};
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
void* operator new(size_t bytes) throw() {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void* data) throw() {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
class StmtBitfields {
friend class Stmt;
/// \brief The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class CompoundStmtBitfields {
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
};
class ExprBitfields {
friend class Expr;
friend class DeclRefExpr; // computeDependence
friend class InitListExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class ASTStmtReader; // deserialization
friend class CXXNewExpr; // ctor
friend class DependentScopeDeclRefExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CallExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class ObjCMessageExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ShuffleVectorExpr; // ctor
friend class ParenListExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class OverloadExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class AtomicExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 2;
unsigned TypeDependent : 1;
unsigned ValueDependent : 1;
unsigned InstantiationDependent : 1;
unsigned ContainsUnexpandedParameterPack : 1;
};
enum { NumExprBits = 16 };
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 2;
};
enum APFloatSemantics {
IEEEhalf,
IEEEsingle,
IEEEdouble,
x87DoubleExtended,
IEEEquad,
PPCDoubleDouble
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 2;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class DeclRefExprBitfields {
friend class DeclRefExpr;
friend class ASTStmtReader; // deserialization
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingLocal : 1;
};
class CastExprBitfields {
friend class CastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned BasePathSize : 32 - 6 - NumExprBits;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
};
class ExprWithCleanupsBitfields {
friend class ExprWithCleanups;
friend class ASTStmtReader; // deserialization
unsigned : NumExprBits;
unsigned NumObjects : 32 - NumExprBits;
};
class PseudoObjectExprBitfields {
friend class PseudoObjectExpr;
friend class ASTStmtReader; // deserialization
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
/// Whether this initializer list initializes a std::initializer_list
/// object.
unsigned InitializesStdInitializerList : 1;
};
class TypeTraitExprBitfields {
friend class TypeTraitExpr;
friend class ASTStmtReader;
friend class ASTStmtWriter;
unsigned : NumExprBits;
/// \brief The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// \brief If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// \brief The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
union {
// FIXME: this is wasteful on 64-bit platforms.
void *Aligner;
StmtBitfields StmtBits;
CompoundStmtBitfields CompoundStmtBits;
ExprBitfields ExprBits;
CharacterLiteralBitfields CharacterLiteralBits;
FloatingLiteralBitfields FloatingLiteralBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
DeclRefExprBitfields DeclRefExprBits;
CastExprBitfields CastExprBits;
CallExprBitfields CallExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
InitListExprBitfields InitListExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
};
friend class ASTStmtReader;
friend class ASTStmtWriter;
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, ASTContext& C,
unsigned alignment = 8) throw();
void* operator new(size_t bytes, ASTContext* C,
unsigned alignment = 8) throw();
void* operator new(size_t bytes, void* mem) throw() {
return mem;
}
void operator delete(void*, ASTContext&, unsigned) throw() { }
void operator delete(void*, ASTContext*, unsigned) throw() { }
void operator delete(void*, std::size_t) throw() { }
void operator delete(void*, void*) throw() { }
public:
/// \brief A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell { };
private:
/// \brief Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// \brief Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) {
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
public:
Stmt(StmtClass SC) {
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getLocStart() const LLVM_READONLY;
SourceLocation getLocEnd() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// \brief Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
LLVM_ATTRIBUTE_USED void dump() const;
LLVM_ATTRIBUTE_USED void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
/// dumpColor - same as dump(), but forces color highlighting.
LLVM_ATTRIBUTE_USED void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy,
unsigned Indentation = 0) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip past any implicit AST nodes which might surround this
/// statement, such as ExprWithCleanups or ImplicitCastExpr nodes.
Stmt *IgnoreImplicit();
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// hasImplicitControlFlow - Some statements (e.g. short circuited operations)
/// contain implicit control-flow in the order their subexpressions
/// are evaluated. This predicate returns true if this statement has
/// such implicit control-flow. Such statements are also specially handled
/// within CFGs.
bool hasImplicitControlFlow() const;
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
typedef StmtIterator child_iterator;
typedef ConstStmtIterator const_child_iterator;
typedef StmtRange child_range;
typedef ConstStmtRange const_child_range;
child_range children();
const_child_range children() const {
return const_cast<Stmt*>(this)->children();
}
child_iterator child_begin() { return children().first; }
child_iterator child_end() { return children().second; }
const_child_iterator child_begin() const { return children().first; }
const_child_iterator child_end() const { return children().second; }
/// \brief Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
///
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc,
SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg),
StartLoc(startLoc), EndLoc(endLoc) {}
/// \brief Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { }
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const {
return DG.isSingleDecl();
}
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
SourceLocation getStartLoc() const { return StartLoc; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
typedef DeclGroupRef::iterator decl_iterator;
typedef DeclGroupRef::const_iterator const_decl_iterator;
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
SourceLocation SemiLoc;
/// \brief True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
bool HasLeadingEmptyMacro;
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass), SemiLoc(L),
HasLeadingEmptyMacro(hasLeadingEmptyMacro) {}
/// \brief Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty),
HasLeadingEmptyMacro(false) { }
SourceLocation getSemiLoc() const { return SemiLoc; }
void setSemiLoc(SourceLocation L) { SemiLoc = L; }
bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; }
SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() { return child_range(); }
friend class ASTStmtReader;
friend class ASTStmtWriter;
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
///
class CompoundStmt : public Stmt {
Stmt** Body;
SourceLocation LBracLoc, RBracLoc;
public:
CompoundStmt(ASTContext &C, ArrayRef<Stmt*> Stmts,
SourceLocation LB, SourceLocation RB);
// \brief Build an empty compound statment with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), Body(0), LBracLoc(Loc), RBracLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
}
// \brief Build an empty compound statement.
explicit CompoundStmt(EmptyShell Empty)
: Stmt(CompoundStmtClass, Empty), Body(0) {
CompoundStmtBits.NumStmts = 0;
}
void setStmts(ASTContext &C, Stmt **Stmts, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
typedef Stmt** body_iterator;
body_iterator body_begin() { return Body; }
body_iterator body_end() { return Body + size(); }
Stmt *body_back() { return !body_empty() ? Body[size()-1] : 0; }
void setLastStmt(Stmt *S) {
assert(!body_empty() && "setLastStmt");
Body[size()-1] = S;
}
typedef Stmt* const * const_body_iterator;
const_body_iterator body_begin() const { return Body; }
const_body_iterator body_end() const { return Body + size(); }
const Stmt *body_back() const { return !body_empty() ? Body[size()-1] : 0; }
typedef std::reverse_iterator<body_iterator> reverse_body_iterator;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
typedef std::reverse_iterator<const_body_iterator>
const_reverse_body_iterator;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
SourceLocation getLocStart() const LLVM_READONLY { return LBracLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RBracLoc; }
SourceLocation getLBracLoc() const { return LBracLoc; }
void setLBracLoc(SourceLocation L) { LBracLoc = L; }
SourceLocation getRBracLoc() const { return RBracLoc; }
void setRBracLoc(SourceLocation L) { RBracLoc = L; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() {
return child_range(&Body[0], &Body[0]+CompoundStmtBits.NumStmts);
}
const_child_range children() const {
return child_range(&Body[0], &Body[0]+CompoundStmtBits.NumStmts);
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
// A pointer to the following CaseStmt or DefaultStmt class,
// used by SwitchStmt.
SwitchCase *NextSwitchCase;
SourceLocation KeywordLoc;
SourceLocation ColonLoc;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), NextSwitchCase(0), KeywordLoc(KWLoc), ColonLoc(ColonLoc) {}
SwitchCase(StmtClass SC, EmptyShell)
: Stmt(SC), NextSwitchCase(0) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return KeywordLoc; }
void setKeywordLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase*>(this)->getSubStmt();
}
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
class CaseStmt : public SwitchCase {
enum { LHS, RHS, SUBSTMT, END_EXPR };
Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for
// GNU "case 1 ... 4" extension
SourceLocation EllipsisLoc;
public:
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
SubExprs[SUBSTMT] = 0;
SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs);
SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs);
EllipsisLoc = ellipsisLoc;
}
/// \brief Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { }
SourceLocation getCaseLoc() const { return KeywordLoc; }
void setCaseLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); }
Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); }
Stmt *getSubStmt() { return SubExprs[SUBSTMT]; }
const Expr *getLHS() const {
return reinterpret_cast<const Expr*>(SubExprs[LHS]);
}
const Expr *getRHS() const {
return reinterpret_cast<const Expr*>(SubExprs[RHS]);
}
const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; }
void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; }
void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); }
void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); }
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[END_EXPR]);
}
};
class DefaultStmt : public SwitchCase {
Stmt* SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) :
SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// \brief Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) { }
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return KeywordLoc; }
void setDefaultLoc(SourceLocation L) { KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt+1); }
};
inline SourceLocation SwitchCase::getLocEnd() const {
if (const CaseStmt *CS = dyn_cast<CaseStmt>(this))
return CS->getLocEnd();
return cast<DefaultStmt>(this)->getLocEnd();
}
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
///
class LabelStmt : public Stmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
SourceLocation IdentLoc;
public:
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: Stmt(LabelStmtClass), TheDecl(D), SubStmt(substmt), IdentLoc(IL) {
}
// \brief Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { }
SourceLocation getIdentLoc() const { return IdentLoc; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setIdentLoc(SourceLocation L) { IdentLoc = L; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
child_range children() { return child_range(&SubStmt, &SubStmt+1); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// \brief Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
///
class AttributedStmt : public Stmt {
Stmt *SubStmt;
SourceLocation AttrLoc;
unsigned NumAttrs;
const Attr *Attrs[1];
friend class ASTStmtReader;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt)
: Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc),
NumAttrs(Attrs.size()) {
memcpy(this->Attrs, Attrs.data(), Attrs.size() * sizeof(Attr*));
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) {
memset(Attrs, 0, NumAttrs * sizeof(Attr*));
}
public:
static AttributedStmt *Create(ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr*> Attrs, Stmt *SubStmt);
// \brief Build an empty attributed statement.
static AttributedStmt *CreateEmpty(ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttrLoc; }
ArrayRef<const Attr*> getAttrs() const {
return ArrayRef<const Attr*>(Attrs, NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
///
class IfStmt : public Stmt {
enum { VAR, COND, THEN, ELSE, END_EXPR };
Stmt* SubExprs[END_EXPR];
SourceLocation IfLoc;
SourceLocation ElseLoc;
public:
IfStmt(ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = 0);
/// \brief Build an empty if/then/else statement
explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { }
/// \brief Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(ASTContext &C, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); }
const Stmt *getThen() const { return SubExprs[THEN]; }
void setThen(Stmt *S) { SubExprs[THEN] = S; }
const Stmt *getElse() const { return SubExprs[ELSE]; }
void setElse(Stmt *S) { SubExprs[ELSE] = S; }
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Stmt *getThen() { return SubExprs[THEN]; }
Stmt *getElse() { return SubExprs[ELSE]; }
SourceLocation getIfLoc() const { return IfLoc; }
void setIfLoc(SourceLocation L) { IfLoc = L; }
SourceLocation getElseLoc() const { return ElseLoc; }
void setElseLoc(SourceLocation L) { ElseLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
if (SubExprs[ELSE])
return SubExprs[ELSE]->getLocEnd();
else
return SubExprs[THEN]->getLocEnd();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
///
class SwitchStmt : public Stmt {
enum { VAR, COND, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR];
// This points to a linked list of case and default statements.
SwitchCase *FirstCase;
SourceLocation SwitchLoc;
/// If the SwitchStmt is a switch on an enum value, this records whether
/// all the enum values were covered by CaseStmts. This value is meant to
/// be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
public:
SwitchStmt(ASTContext &C, VarDecl *Var, Expr *cond);
/// \brief Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { }
/// \brief Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(ASTContext &C, VarDecl *V);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Stmt *getBody() const { return SubExprs[BODY]; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SwitchCase *getSwitchCaseList() { return FirstCase; }
/// \brief Set the case list for this switch statement.
///
/// The caller is responsible for incrementing the retain counts on
/// all of the SwitchCase statements in this list.
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
SubExprs[BODY] = S;
SwitchLoc = SL;
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase()
&& "case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() {
AllEnumCasesCovered = 1;
}
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return (bool) AllEnumCasesCovered;
}
SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY]->getLocEnd();
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
///
class WhileStmt : public Stmt {
enum { VAR, COND, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR];
SourceLocation WhileLoc;
public:
WhileStmt(ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
SourceLocation WL);
/// \brief Build an empty while statement.
explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { }
/// \brief Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(ASTContext &C, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY]->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// DoStmt - This represents a 'do/while' stmt.
///
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt* SubExprs[END_EXPR];
SourceLocation DoLoc;
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) {
SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
SubExprs[BODY] = body;
}
/// \brief Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { }
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getDoLoc() const { return DoLoc; }
void setDoLoc(SourceLocation L) { DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
///
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation ForLoc;
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc,
Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP);
/// \brief Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { }
Stmt *getInit() { return SubExprs[INIT]; }
/// \brief Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForLoc; }
void setForLoc(SourceLocation L) { ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return SubExprs[BODY]->getLocEnd();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
///
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation GotoLoc;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {}
/// \brief Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { }
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() { return child_range(); }
};
/// IndirectGotoStmt - This represents an indirect goto.
///
class IndirectGotoStmt : public Stmt {
SourceLocation GotoLoc;
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc,
Expr *target)
: Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc),
Target((Stmt*)target) {}
/// \brief Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) { }
void setGotoLoc(SourceLocation L) { GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr*>(Target); }
const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt*>(this)->getConstantTarget();
}
SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target+1); }
};
/// ContinueStmt - This represents a continue.
///
class ContinueStmt : public Stmt {
SourceLocation ContinueLoc;
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {}
/// \brief Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { }
SourceLocation getContinueLoc() const { return ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() { return child_range(); }
};
/// BreakStmt - This represents a break.
///
class BreakStmt : public Stmt {
SourceLocation BreakLoc;
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) {}
/// \brief Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { }
SourceLocation getBreakLoc() const { return BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakLoc = L; }
SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() { return child_range(); }
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
///
class ReturnStmt : public Stmt {
Stmt *RetExpr;
SourceLocation RetLoc;
const VarDecl *NRVOCandidate;
public:
ReturnStmt(SourceLocation RL)
: Stmt(ReturnStmtClass), RetExpr(0), RetLoc(RL), NRVOCandidate(0) { }
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate)
: Stmt(ReturnStmtClass), RetExpr((Stmt*) E), RetLoc(RL),
NRVOCandidate(NRVOCandidate) {}
/// \brief Build an empty return expression.
explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { }
const Expr *getRetValue() const;
Expr *getRetValue();
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); }
SourceLocation getReturnLoc() const { return RetLoc; }
void setReturnLoc(SourceLocation L) { RetLoc = L; }
/// \brief Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const { return NRVOCandidate; }
void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; }
SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; }
SourceLocation getLocEnd() const LLVM_READONLY {
return RetExpr ? RetExpr->getLocEnd() : RetLoc;
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr) return child_range(&RetExpr, &RetExpr+1);
return child_range();
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
///
class AsmStmt : public Stmt {
protected:
SourceLocation AsmLoc;
/// \brief True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// \brief If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers) :
Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { }
friend class ASTStmtReader;
public:
/// \brief Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) :
Stmt(SC, Empty), Exprs(0) { }
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); }
SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
typedef ExprIterator inputs_iterator;
typedef ConstExprIterator const_inputs_iterator;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
// Output expr iterators.
typedef ExprIterator outputs_iterator;
typedef ConstExprIterator const_outputs_iterator;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
///
class GCCAsmStmt : public AsmStmt {
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints;
StringLiteral **Clobbers;
IdentifierInfo **Names;
friend class ASTStmtReader;
public:
GCCAsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, SourceLocation rparenloc);
/// \brief Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty),
Constraints(0), Clobbers(0), Names(0) { }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, char Modifier)
: MyKind(Operand), Str(), OperandNo(OpNo) {
Str += Modifier;
}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const {
assert(isString());
return Str;
}
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const {
assert(isOperand());
return Str[0];
}
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const {
return Names[i];
}
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return StringRef();
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return StringRef();
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
private:
void setOutputsAndInputsAndClobbers(ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
///
class MSAsmStmt : public AsmStmt {
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks;
Token *AsmToks;
StringRef *Constraints;
StringRef *Clobbers;
friend class ASTStmtReader;
public:
MSAsmStmt(ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc,
bool issimple, bool isvolatile, ArrayRef<Token> asmtoks,
unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// \brief Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty),
NumAsmToks(0), AsmToks(0), Constraints(0), Clobbers(0) { }
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return ArrayRef<StringRef>(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return ArrayRef<StringRef>(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return ArrayRef<Expr*>(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(ASTContext &C,
StringRef AsmString,
ArrayRef<Token> AsmToks,
ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs,
ArrayRef<StringRef> Clobbers);
public:
SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; }
SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0]);
}
};
class SEHExceptStmt : public Stmt {
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
friend class ASTReader;
friend class ASTStmtReader;
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { }
public:
static SEHExceptStmt* Create(ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children,Children+2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc,
Stmt *Block);
friend class ASTReader;
friend class ASTStmtReader;
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { }
public:
static SEHFinallyStmt* Create(ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getLocEnd(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
friend class ASTReader;
friend class ASTStmtReader;
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { }
public:
static SEHTryStmt* Create(ASTContext &C,
bool isCXXTry,
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children,Children+2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// \brief This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// \brief The different capture forms: by 'this' or by reference, etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef
};
/// \brief Describes the capture of either a variable or 'this'.
class Capture {
llvm::PointerIntPair<VarDecl *, 1, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
/// \brief Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
///
Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = 0)
: VarAndKind(Var, Kind), Loc(Loc) {
switch (Kind) {
case VCK_This:
assert(Var == 0 && "'this' capture cannot have a variable!");
break;
case VCK_ByRef:
assert(Var && "capturing by reference must have a variable!");
break;
}
}
/// \brief Determine the kind of capture.
VariableCaptureKind getCaptureKind() const { return VarAndKind.getInt(); }
/// \brief Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// \brief Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// \brief Determine whether this capture handles a variable.
bool capturesVariable() const { return getCaptureKind() != VCK_This; }
/// \brief Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture does not capture 'this'.
VarDecl *getCapturedVar() const {
assert(!capturesThis() && "No variable available for 'this' capture");
return VarAndKind.getPointer();
}
friend class ASTStmtReader;
};
private:
/// \brief The number of variable captured, including 'this'.
unsigned NumCaptures;
/// \brief The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind;
/// \brief The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl;
/// \brief Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// \brief Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() const {
return reinterpret_cast<Stmt **>(const_cast<CapturedStmt *>(this) + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
static CapturedStmt *Create(ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(ASTContext &Context,
unsigned NumCaptures);
/// \brief Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const {
return const_cast<CapturedStmt *>(this)->getCapturedStmt();
}
/// \brief Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl() { return CapDeclAndKind.getPointer(); }
const CapturedDecl *getCapturedDecl() const {
return const_cast<CapturedStmt *>(this)->getCapturedDecl();
}
/// \brief Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D) {
assert(D && "null CapturedDecl");
CapDeclAndKind.setPointer(D);
}
/// \brief Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const {
return CapDeclAndKind.getInt();
}
/// \brief Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind) {
CapDeclAndKind.setInt(Kind);
}
/// \brief Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// \brief Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// \brief True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// \brief An iterator that walks over the captures.
typedef Capture *capture_iterator;
typedef const Capture *const_capture_iterator;
/// \brief Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// \brief Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// \brief Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// \brief Iterator that walks over the capture initialization arguments.
typedef Expr **capture_init_iterator;
/// \brief Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr **>(getStoredStmts());
}
/// \brief Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getLocStart() const LLVM_READONLY {
return getCapturedStmt()->getLocStart();
}
SourceLocation getLocEnd() const LLVM_READONLY {
return getCapturedStmt()->getLocEnd();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
friend class ASTStmtReader;
};
} // end namespace clang
#endif
|
DataArray.h | // Copyright (c) 2013 Vasili Baranau
// Distributed under the MIT software license
// See the accompanying file License.txt or http://opensource.org/licenses/MIT
#ifndef ImageProcessing_Model_Headers_DataArray_h
#define ImageProcessing_Model_Headers_DataArray_h
#include "Image.h"
#include "Core/Headers/StlUtilities.h"
#include "Core/Headers/Path.h"
#include "Core/Headers/MemoryUtility.h"
#include "ImageProcessing/Model/Headers/Config.h"
#include "ImageProcessing/Model/Headers/Constants.h"
#include "ImageProcessing/Services/Headers/Serializer.h"
#include "ImageProcessing/Model/Headers/IDataArray.h"
namespace Model
{
template<class TData>
class DataArray : public virtual IDataArray
{
public:
TData defaultValue;
private:
// Const variables
const Config* config;
const std::vector<ActiveArea>* activeAreas;
std::string workingPath;
std::vector<std::string> initialImageFilePaths;
std::vector<std::string> imageFilePaths;
TData*** dataArray;
// Dynamic variables
ActiveArea currentActiveArea;
bool imageChanged;
bool activeAreaLoaded;
public:
DataArray()
{
defaultValue = 0;
dataArray = NULL;
}
OVERRIDE void Initialize(const Config& currentConfig, std::string currentWorkingPath, const std::vector<ActiveArea>& currentActiveAreas)
{
config = ¤tConfig;
activeAreas = ¤tActiveAreas;
workingPath = currentWorkingPath;
Clear();
FillImagePaths();
imageChanged = false;
activeAreaLoaded = false;
AllocateMemory();
}
std::string GetWorkingPath() const
{
return workingPath;
}
virtual ~DataArray()
{
Clear();
}
OVERRIDE int GetBytesPerPixel() const
{
return sizeof(TData);
}
inline TData GetPixel(const Core::DiscreteSpatialVector& position) const
{
return GetPixel(position[Core::Axis::X], position[Core::Axis::Y], position[Core::Axis::Z]);
}
inline TData GetPixel(int x, int y, int z) const
{
int localXIndex = x - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::X];
int localYIndex = y - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Y];
int localZIndex = z - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Z];
// The first dimension is z. It's no mistake. 2D images have row-major order (as used in C++)
// but adding them together makes z axis the major one.
return dataArray[localZIndex][localXIndex][localYIndex];
}
// inline TData& GetPixelReference(const Core::DiscreteSpatialVector& position) const
// {
// return GetPixelReference(position[Core::Axis::X], position[Core::Axis::Y], position[Core::Axis::Z]);
// }
//
// inline TData& GetPixelReference(int x, int y, int z) const
// {
// int localXIndex = x - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::X];
// int localYIndex = y - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Y];
// int localZIndex = z - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Z];
//
// // The first dimension is z. It's no mistake. 2D images have row-major order (as used in C++)
// // but adding them together makes z axis the major one.
// return dataArray[localZIndex][localXIndex][localYIndex];
// }
inline void SetPixel(const Core::DiscreteSpatialVector& position, TData value)
{
SetPixel(position[Core::Axis::X], position[Core::Axis::Y], position[Core::Axis::Z], value);
}
inline void SetPixel(int x, int y, int z, TData value)
{
imageChanged = true;
int localXIndex = x - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::X];
int localYIndex = y - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Y];
int localZIndex = z - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Z];
dataArray[localZIndex][localXIndex][localYIndex] = value;
}
// Saves the current active area to disk, loads the new active area from disk. If there are no images for the new active area, initializes the missing values with zeros
OVERRIDE void ChangeActiveArea(int activeAreaIndex)
{
Core::Path::EnsureDirectory(workingPath);
const Model::ActiveArea& newActiveArea = activeAreas->at(activeAreaIndex);
// If the current active area covers the entire image, return
if (activeAreaLoaded && currentActiveArea.boxWithMargins.boxSize == config->imageSize)
{
return;
}
// If the new active area is the same as the old one
if (activeAreaLoaded && newActiveArea.boxWithMargins == currentActiveArea.boxWithMargins)
{
return;
}
Core::Path::EnsureDirectory(workingPath);
// Save the current active area to the disk, if necessary
if (activeAreaLoaded && imageChanged)
{
WriteCurrentActiveAreaSafe();
}
imageChanged = false;
ReindexMemory(currentActiveArea, newActiveArea);
currentActiveArea = newActiveArea;
ReadCurrentActiveAreaSafe();
activeAreaLoaded = true;
}
// Writes current active area to disk, if necessary
OVERRIDE void WriteCurrentActiveArea() const
{
if (activeAreaLoaded && imageChanged)
{
Core::Path::EnsureDirectory(workingPath);
WriteCurrentActiveAreaSafe();
}
}
OVERRIDE void Clear()
{
if (dataArray != NULL)
{
Core::MemoryUtility::Free3DArray(dataArray);
dataArray = NULL;
}
}
private:
void ReadCurrentActiveAreaSafe()
{
// Load the next active area from disk
int startImageIndex = currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Z];
int endImageIndex = currentActiveArea.boxWithMargins.exclusiveRightCorner[Core::Axis::Z];
printf("Reading images %d - %d from folder %s...\n", startImageIndex + 1, endImageIndex, workingPath.c_str());
#pragma omp parallel for schedule(static)
for (int i = startImageIndex; i < endImageIndex; ++i)
{
if ((i - startImageIndex + 1) % 10 == 0)
{
printf("Reading image %d / %d...\n", i - startImageIndex + 1, endImageIndex - startImageIndex);
}
std::string imagePath = imageFilePaths[i];
if (!Core::Path::Exists(imagePath))
{
// Copy image from the initial folder path (to preserve all the metadata)
Core::Path::CopyFile(initialImageFilePaths[i], imagePath);
// Prepare the image and fill in the entire image with default values
ResizeAndResetImage(imagePath, defaultValue);
// Fill in the image in memory with default values
// NOTE: I may proceed to the function CopySubImage below as well, but it's a little slower
int localZIndex = i - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Z];
int elementsCount = currentActiveArea.boxWithMargins.boxSize[Core::Axis::X] * currentActiveArea.boxWithMargins.boxSize[Core::Axis::Y];
TData* valuesArray = dataArray[localZIndex][0]; // NOTE: I know that Allocate3DArray allocates the data in last dimension sequentially
std::fill(valuesArray, valuesArray + elementsCount, defaultValue);
}
else
{
Rectangle imageRectangle(currentActiveArea.boxWithMargins);
TData** currentImageActiveArea = GetCurrentImageActiveArea(i);
LoadRectangle(imagePath, imageRectangle, currentImageActiveArea);
}
}
}
void WriteCurrentActiveAreaSafe() const
{
int startImageIndex = currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Z];
int endImageIndex = currentActiveArea.boxWithMargins.exclusiveRightCorner[Core::Axis::Z];
printf("Writing images %d - %d to folder %s...\n", startImageIndex + 1, endImageIndex, workingPath.c_str());
#pragma omp parallel for schedule(static)
for (int i = startImageIndex; i < endImageIndex; ++i)
{
if ((i - startImageIndex + 1) % 10 == 0)
{
printf("Writing image %d / %d...\n", i - startImageIndex + 1, endImageIndex - startImageIndex);
}
std::string imagePath = imageFilePaths[i];
if (!Core::Path::Exists(imagePath))
{
// Copy an initial image from the initial images folder
Core::Path::CopyFile(initialImageFilePaths[i], imageFilePaths[i]);
}
Rectangle imageRectangle(currentActiveArea.boxWithMargins);
TData** currentImageActiveArea = GetCurrentImageActiveArea(i);
SaveRectangle(imagePath, imageRectangle, currentImageActiveArea);
}
}
void AllocateMemory()
{
// The first dimension is z. It's no mistake. 2D images have row-major order (as used in C++)
// but adding them together makes z axis the major one.
size_t maxZSize = 0;
size_t maxZXSize = 0;
size_t maxActiveAreaSize = 0;
// Find max active area size, allocate memory
for (size_t i = 0; i < activeAreas->size(); ++i)
{
const Core::DiscreteSpatialVector& boxSize = activeAreas->at(i).boxWithMargins.boxSize;
size_t currentAreaSize = Core::VectorUtilities::GetProductGeneric<Core::DiscreteSpatialVector, size_t>(boxSize);
if (currentAreaSize > maxActiveAreaSize)
{
maxActiveAreaSize = currentAreaSize;
}
size_t currentZXSize = static_cast<size_t>(boxSize[Core::Axis::Z]) * boxSize[Core::Axis::X];
if (currentZXSize > maxZXSize)
{
maxZXSize = currentZXSize;
}
if (static_cast<size_t>(boxSize[Core::Axis::Z]) > maxZSize)
{
maxZSize = boxSize[Core::Axis::Z];
}
}
dataArray = new TData** [maxZSize];
dataArray[0] = new TData* [maxZXSize];
dataArray[0][0] = new TData[maxActiveAreaSize];
}
void ResizeAndResetImage(std::string imagePath, TData defaultValue) const
{
Image<TData> currentImage;
currentImage.Load(imagePath);
currentImage.EnsureBitsPerPixel();
currentImage.ResizeIfNecessary(config->imageSize[Core::Axis::X], config->imageSize[Core::Axis::Y]);
currentImage.Fill(defaultValue);
currentImage.Save(imagePath);
}
void LoadRectangle(std::string imagePath, const Model::Rectangle& rectangle, TData** values) const
{
Image<TData> currentImage;
currentImage.Load(imagePath);
currentImage.CheckBitsPerPixel();
currentImage.CheckSize(config->imageSize[Core::Axis::X], config->imageSize[Core::Axis::Y]);
currentImage.ReadRectangle(rectangle, values);
}
void SaveRectangle(std::string imagePath, const Model::Rectangle& rectangle, TData** values) const
{
Image<TData> currentImage;
currentImage.Load(imagePath);
currentImage.EnsureBitsPerPixel();
currentImage.ResizeIfNecessary(config->imageSize[Core::Axis::X], config->imageSize[Core::Axis::Y]);
currentImage.WriteRectangle(rectangle, values);
currentImage.Save(imagePath);
}
TData** GetCurrentImageActiveArea(int zIndex) const
{
int localZIndex = zIndex - currentActiveArea.boxWithMargins.leftCorner[Core::Axis::Z];
TData** currentImageActiveArea = dataArray[localZIndex];
return currentImageActiveArea;
}
void ReindexMemory(const ActiveArea& previousActiveArea, const ActiveArea& currentActiveArea)
{
bool existingImageSizeChanges = activeAreaLoaded && (previousActiveArea.boxWithMargins.boxSize != currentActiveArea.boxWithMargins.boxSize);
if (!activeAreaLoaded || existingImageSizeChanges)
{
const Core::DiscreteSpatialVector& boxSize = currentActiveArea.boxWithMargins.boxSize;
// The first dimension is z. It's no mistake. 2D images have row-major order (as used in C++)
// but adding them together makes z axis the major one.
Core::MemoryUtility::ReindexMemory<TData>(boxSize[2], boxSize[0], boxSize[1], dataArray);
}
}
void FillImagePaths()
{
// read final images
Services::Serializer::FillImagePaths(workingPath, &imageFilePaths);
// if their count is OK
size_t expectedImageCount = static_cast<size_t>(config->imageSize[Core::Axis::Z]);
if (imageFilePaths.size() == expectedImageCount)
{
// copy them to the initialImageFilePaths
initialImageFilePaths.resize(expectedImageCount);
Core::StlUtilities::Copy(imageFilePaths, &initialImageFilePaths);
}
// if it is zero
else if (imageFilePaths.size() == 0)
{
// read initial images
std::string initialImagesFolder = Core::Path::Append(config->baseFolder, INITIAL_IMAGES_FOLDER_NAME);
Services::Serializer::FillImagePaths(initialImagesFolder, &initialImageFilePaths);
// if their count is not OK, throw exception
if (initialImageFilePaths.size() != expectedImageCount)
{
// throw exception
throw Core::InvalidOperationException("Working folder contains no images and initial folder contains wrong number of images");
}
// copy them to the image path, change the folder
ChangeFolder(initialImagesFolder, workingPath, initialImageFilePaths, &imageFilePaths);
}
// if it is not zero
else
{
// throw exception
throw Core::InvalidOperationException("Number of images is incorrect: it is neither zero nor the expected number of images");
}
}
void ChangeFolder(std::string sourceFolder, std::string targetFolder, const std::vector<std::string>& sourcePaths, std::vector<std::string>* targetPaths) const
{
std::vector<std::string>& targetPathsRef = *targetPaths;
targetPathsRef.resize(sourcePaths.size());
for (size_t i = 0; i < targetPathsRef.size(); ++i)
{
std::string sourcePath = sourcePaths[i];
std::string targetName = Core::Path::GetFileName(sourcePath);
targetPathsRef[i] = Core::Path::Append(targetFolder, targetName);
}
}
DISALLOW_COPY_AND_ASSIGN(DataArray);
};
}
#endif /* ImageProcessing_Model_Headers_DataArray_h */
|
VectorReduction.h | ////////////////////////////////////////////////////////////////////////////////
// //
// File Name: CSVector.h //
// //
// Author: Andreas Buttenschoen <andreas@buttenschoen.ca> //
// Created: 2018-03-30 12:22:20 //
// //
////////////////////////////////////////////////////////////////////////////////
#ifndef CS_VECTOR_REDUCTION_H
#define CS_VECTOR_REDUCTION_H
#include <iostream>
#include <cmath>
using std::abs;
using std::max;
template <typename T>
T zero(T value)
{
return T(0);
}
struct one_norm_functor
{
template <typename Value>
static inline void init(Value& value)
{
value = zero(value);
}
template <typename Value, typename Element>
static inline void update(Value& value, const Element& x)
{
value += abs(x);
}
template <typename Value>
static inline void finish(Value& value, const Value& value2)
{
value += value2;
}
template <typename Value>
static inline Value post_reduction(const Value& value)
{
return value;
}
};
struct sum_functor
{
template <typename Value>
static inline void init(Value& value)
{
value = zero(value);
}
template <typename Value, typename Element>
static inline void update(Value& value, const Element& x)
{
value += x;
}
template <typename Value>
static inline void finish(Value& value, const Value& value2)
{
value += value2;
}
template <typename Value>
static inline Value post_reduction(const Value& value)
{
return value;
}
};
struct product_functor
{
template <typename Value>
static inline void init(Value& value)
{
value = zero(value);
}
template <typename Value, typename Element>
static inline void update(Value& value, const Element& x)
{
value *= x;
}
template <typename Value>
static inline void finish(Value& value, const Value& value2)
{
value *= value2;
}
template <typename Value>
static inline Value post_reduction(const Value& value)
{
return value;
}
};
struct two_norm_functor
{
template <typename Value>
static inline void init(Value& value)
{
value = zero(value);
}
template <typename Value, typename Element>
static inline void update(Value& value, const Element& x)
{
value += x * x;
}
template <typename Value>
static inline void finish(Value& value, const Value& value2)
{
value += value2;
}
template <typename Value>
static inline Value post_reduction(const Value& value)
{
Value (*sqrt) (const Value) = std::sqrt;
return sqrt(value);
}
};
struct unary_dot : two_norm_functor
{
template <typename Value>
static inline Value post_reduction(const Value& value)
{
return value;
}
};
struct infinity_norm_functor
{
template <typename Value>
static inline void init(Value& value)
{
value = zero(value);
}
template <typename Value, typename Element>
static inline void update(Value& value, const Element& x)
{
value = max(value, abs(x));
}
template <typename Value>
static inline void finish(Value& value, const Value& value2)
{
value = max(value, abs(value2));
}
template <typename Value>
static inline Value post_reduction(const Value& value)
{
return value;
}
};
namespace impl {
template <unsigned long Index0, unsigned long Max0, typename Functor>
struct reduction
{
using next = reduction<Index0 + 1, Max0, Functor>;
template <typename Value>
static inline void init(Value& tmp00, Value& tmp01, Value& tmp02, Value& tmp03, Value& tmp04, Value& tmp05, Value& tmp06, Value& tmp07)
{
Functor::init(tmp00);
next::init(tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07, tmp00);
}
template <typename Value, typename Vector, typename Size>
static inline void update(Value& tmp00, Value& tmp01, Value& tmp02, Value& tmp03, Value& tmp04, Value& tmp05, Value& tmp06, Value& tmp07, const Vector& v, Size i)
{
Functor::update(tmp00, v[i + Index0]);
next::update(tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07, tmp00, v, i);
}
template <typename Value>
static inline void finish(Value& tmp00, Value& tmp01, Value& tmp02, Value& tmp03, Value& tmp04, Value& tmp05, Value& tmp06, Value& tmp07)
{
next::finish(tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07, tmp00);
Functor::finish(tmp00, tmp01);
}
};
template <unsigned long Max0, typename Functor>
struct reduction<Max0, Max0, Functor>
{
template <typename Value>
static inline void init(Value& tmp00, Value&, Value&, Value&, Value&, Value&, Value&, Value&)
{
Functor::init(tmp00);
}
template <typename Value, typename Vector, typename Size>
static inline void update(Value& tmp00, Value&, Value&, Value&, Value&, Value&, Value&, Value&, const Vector& v, Size i)
{
Functor::update(tmp00, v[i + Max0]);
}
template <typename Value>
static inline void finish(Value&, Value&, Value&, Value&, Value&, Value&, Value&, Value&)
{}
};
} // end namespace
template <unsigned long Unroll, typename Functor, typename Result>
struct reduction
{
template <typename Vector>
static inline Result apply(const Vector& v)
{
//using value_type = typename Vector::value_type;
using size_type = typename Vector::size_type;
Result result;
Result tmp00, tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07;
constexpr size_type UNROLL = std::min(Unroll, size_type(8));
auto s = size(v);
auto sb = s / UNROLL * UNROLL;
Functor::init(result);
//#pragma omp parallel
{
impl::reduction<0, UNROLL-1, Functor>::init(tmp00, tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07);
//#pragma omp for
for (size_t i = 0; i < sb; i+=UNROLL)
impl::reduction<0, UNROLL-1, Functor>::update(tmp00, tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07, v, i);
impl::reduction<0, UNROLL-1, Functor>::finish(tmp00, tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07);
//#pragma omp critical
Functor::finish(result, tmp00);
}
for (size_t i = sb; i < s; i++)
Functor::update(result, v[i]);
return Functor::post_reduction(result);
}
};
namespace impl {
template <unsigned long Index0, unsigned long Max0>
struct dot_aux
{
using next = dot_aux<Index0 + 1, Max0>;
template <typename Value, typename Vector1, typename Vector2, typename Size>
static inline void apply(Value& tmp00, Value& tmp01, Value& tmp02, Value& tmp03, Value& tmp04, Value& tmp05, Value& tmp06, Value& tmp07, const Vector1& v1, const Vector2& v2, Size i)
{
tmp00 = std::fma(v1[i + Index0], v2[i + Index0], tmp00);
next::apply(tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07, tmp00, v1, v2, i);
}
};
template <unsigned long Max0>
struct dot_aux<Max0, Max0>
{
template <typename Value, typename Vector1, typename Vector2, typename Size>
static inline void apply(Value& tmp00, Value&, Value&, Value&, Value&, Value&, Value&, Value&, const Vector1& v1, const Vector2& v2, Size i)
{
tmp00 = std::fma(v1[i + Max0], v2[i + Max0], tmp00);
}
};
template <unsigned long Unroll>
struct dot
{
template <typename Vector1, typename Vector2>
static inline auto apply(const Vector1& v1, const Vector2& v2)
{
using value_type = typename Vector1::value_type;
using size_type = typename Vector1::size_type;
value_type z = value_type(0);
value_type result = z;
constexpr size_type UNROLL = std::min(Unroll, size_type(8));
const size_type N = size(v1);
const size_type no_loops = N / UNROLL;
//#pragma omp parallel
{
value_type tmp00 = z, tmp01 = z, tmp02 = z, tmp03 = z, tmp04 = z, tmp05 = z, tmp06 = z, tmp07 = z;
//#pragma omp for
for (size_type i = 0; i < no_loops; i+=UNROLL)
dot_aux<0, UNROLL-1>::apply(tmp00, tmp01, tmp02, tmp03, tmp04, tmp05, tmp06, tmp07, v1, v2, i);
//#pragma omp critical
result += ((tmp00 + tmp01) + (tmp02 + tmp03)) + ((tmp04 + tmp05) + (tmp06 + tmp07));
}
for (size_type i = UNROLL * no_loops; i < N; i++)
result = std::fma(v1[i], v2[i], result);
return result;
}
};
} // end namespace
template <unsigned long Unroll, typename Vector1, typename Vector2>
inline auto dot(const Vector1& v1, const Vector2& v2)
{
return impl::dot<Unroll>::apply(v1, v2);
}
#endif
|
hybrid_report_mask.c | /* Routine reports hybrid affinity information for MPI processes
within an OpenMP region.
Within a parallel region--
Rank 0 gathers thread affinities from each rank & reports.
a.) Within a master region:
Determine maximum length of node name.
Gather node names from each rank.
Determine if there are multiple compute nodes.
Each mpi process (rank) creates static space:
for gathering nodes names and length,
for collecting affinity masks of all threads
within the mpi process (omp_proc_mask).
b.) Within the parallel region:
Determine the mask for the thread (insert into omp_proc_mask)
c.) Within master region:
if rank 0
print mask header
print masks for rank 0
gather masks from non-rank-0 MPI processes (MPI_Irecv).
loop over non-rank-0 processes and print masks (for each thread)
if rank != 0
pack omp_proc_mask mask into omp_mask_pac and send to rank 0.
Free static spaces
Return
*/
#include <stdio.h>
#include <mpi.h>
#include <omp.h>
#include <sched.h>
#include <unistd.h>
#include <stdlib.h>
#include <ctype.h>
#include <cstdlib>
#include "opts.h"
// basic mask printer-- prints a single row with ncpus number of elements
void print_mask(int hd_prnt, char* name, int multi_node, int rank, int thrd, int ncpus, int nranks, int nthrds, int *proc_mask, int tpc, char v);
int boundto(int* nelements_set, int* int_mask);
int get_threads_per_node();
int amask_hybrid(){
// General
int i,j,ierr;
int id, rid,tid;
int in_mpi, in_omp;
int thrd, nthrds;
int ncpus, nel_set;
// Mask storage
static int ** omp_proc_mask;
static int * omp_mask_pac;
char *dummy;
// MPI specific Variables
int rank, nranks;
MPI_Request *request;
MPI_Status *status;
static int multi_node = 0;
static char *all_names;
static int max_name_len;
int name_len;
char proc_name[MPI_MAX_PROCESSOR_NAME];
char v,p;
int tpc; // hwthreads/core
Maskopts opts;
// get print_speed fast or slow (f|c); listing cores or SMT (c|s)
p = opts.get_p();
v = opts.get_v();
tpc=get_threads_per_node();
// In MPI and parallel region ?
MPI_Initialized(&in_mpi);
in_omp = omp_in_parallel();
if(in_mpi == 0){
printf("ERROR: ***** Must call amask_hybrid() in MPI program. ***** \n");
exit(1);
}
// Get rank number & no of ranks via MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nranks);
if(in_omp == 0){
if(rank == 0){
printf(" ***** When using 1 thread, Intel OpenMP MAY report "
"\"not in a parallel region\" (Uh!)***** \n");
printf(" ***** Each row will only have a rank number (no \"0\" thread_id). \n");
printf("WARNING: ***** Unspecified results if amask_hybrid "
"not called in parallel region of MPI code section. ***** \n");
}
}
thrd = omp_get_thread_num(); // thread id
nthrds = omp_get_num_threads(); // Number of Threads
// Get number of cpus (this gives no.
// of cpu_ids in /proc/cpuinfo)
ncpus = (int) sysconf(_SC_NPROCESSORS_ONLN);
// Working only with MPI processes (masters)
#pragma omp master
{
// Get a list of nodes from all ranks.
MPI_Get_processor_name(proc_name,&name_len);
MPI_Allreduce(&name_len, &max_name_len, 1,MPI_INT, MPI_MAX, MPI_COMM_WORLD);
all_names = (char *) malloc(sizeof(int*)*nranks*(max_name_len+1));
MPI_Gather( proc_name, max_name_len+1 , MPI_CHAR,
all_names, max_name_len+1, MPI_CHAR,
0, MPI_COMM_WORLD);
// If multiple nodes, make multi_node non-zero.
if(rank == 0){
for(id=0;id<nranks;id++){
if( strcmp(&all_names[id*(max_name_len+1)],&all_names[0]) ) multi_node++; }
}
// Create shared storage for masks (only master allocates)
omp_proc_mask = (int **) malloc(sizeof(int*)*nthrds);
for(i=0;i<nthrds;i++) omp_proc_mask[i] = (int * ) malloc(sizeof(int )*ncpus );
for(i=0;i<nthrds;i++) for(j=0;j<ncpus;j++) omp_proc_mask[i][j] =0;
}
#pragma omp barrier
#pragma omp critical // (boundto -- may not be thread safe)
ierr = boundto(&nel_set,omp_proc_mask[thrd]);
#pragma omp barrier
#pragma omp master
{
omp_mask_pac = (int *) malloc(sizeof(int)*nranks*nthrds*ncpus); // need packing space for mpi send/recv
if(rank == 0){
request = (MPI_Request *) malloc(sizeof(MPI_Request)*nranks);
status = (MPI_Status *) malloc(sizeof(MPI_Status )*nranks);
print_mask(1, dummy, multi_node, 0, 0, ncpus, nranks,nthrds, omp_proc_mask[0],tpc,v); //print header
fflush(stdout);
for(tid=0;tid<nthrds;tid++){
//print_mask(0, &all_names[tid*(max_name_len+1)], multi_node, 0,tid, ncpus, nranks,nthrds, omp_proc_mask[tid],tpc,v);
print_mask(0, &all_names[ 0 ], multi_node, 0,tid, ncpus, nranks,nthrds, omp_proc_mask[tid],tpc,v);
}
fflush(stdout);
for(rid=1;rid<nranks;rid++){ // Receive other rank's packed mask arrays
MPI_Irecv(&omp_mask_pac[rid*nthrds*ncpus], nthrds*ncpus, MPI_INT, rid, 99, MPI_COMM_WORLD, &request[rid-1]);
}
MPI_Waitall(nranks-1,&request[0],&status[0]);
for(rid=1;rid<nranks;rid++){ // Print for each rank
for(tid=0;tid<nthrds;tid++){
//print_mask(0, &all_names[tid*(max_name_len+1)], multi_node, rid,tid, ncpus, nranks,nthrds, &omp_mask_pac[rid*nthrds*ncpus + tid*ncpus],tpc,v);
print_mask(0, &all_names[rid*(max_name_len+1)], multi_node, rid,tid, ncpus, nranks,nthrds, &omp_mask_pac[rid*nthrds*ncpus + tid*ncpus],tpc,v);
if(p == 's') ierr=usleep(300000);
}
}
if(nranks*nthrds > 50)
print_mask(2, dummy, multi_node, 0, 0, ncpus, nranks,nthrds, omp_proc_mask[0],tpc,v); //print header
fflush(stdout);
} // end root printing
else{ //all non-root ranks
// Pack up the ranks' mask arrays (Uh, should have made one array from beginning!)
for( tid=0;tid<nthrds;tid++){
for( id=0; id<ncpus; id++) omp_mask_pac[(tid*ncpus)+id] = omp_proc_mask[tid][id];
if(p == 's') ierr=usleep(300000);
}
// Send to root
MPI_Send(omp_mask_pac, nthrds*ncpus, MPI_INT, 0, 99, MPI_COMM_WORLD);
} // end non-root printing
// Return allocated space
for(i=0;i<nthrds;i++) free(omp_proc_mask[i]);
free(omp_proc_mask);
free(omp_mask_pac);
if(rank == 0 ){ free(request); free(status);}
free(all_names);
} // end of Master
#pragma omp barrier // JIC, what all threads leaving at the same time.
}
void amask_hybrid_(){ (void) amask_hybrid(); }
|
GB_unop__identity_uint16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint16_int16)
// op(A') function: GB (_unop_tran__identity_uint16_int16)
// C type: uint16_t
// A type: int16_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint16_int16)
(
uint16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
interpolate_op.h | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/math_function.h"
namespace paddle {
namespace operators {
template <typename T, size_t D, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>;
using Tensor = framework::Tensor;
inline std::vector<int> get_new_shape(
const std::vector<const Tensor*>& list_new_shape_tensor) {
// get tensor from
std::vector<int> vec_new_shape;
for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) {
auto tensor = list_new_shape_tensor[i];
PADDLE_ENFORCE_EQ(tensor->dims(), framework::make_ddim({1}),
"shape of dim tensor should be [1]");
if (platform::is_gpu_place(tensor->place())) {
framework::Tensor temp;
TensorCopySync(*tensor, platform::CPUPlace(), &temp);
vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>()));
} else {
vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>()));
}
}
return vec_new_shape;
}
template <typename T>
inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) {
std::vector<T> vec_new_data;
auto* new_data = new_data_tensor->data<T>();
framework::Tensor cpu_starts_tensor;
if (platform::is_gpu_place(new_data_tensor->place())) {
TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor);
new_data = cpu_starts_tensor.data<T>();
}
vec_new_data = std::vector<T>(new_data, new_data + new_data_tensor->numel());
return vec_new_data;
}
template <typename T>
static void NearestNeighborInterpolate(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int n, const int c,
const int out_h, const int out_w,
const bool align_corners) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
for (int k = 0; k < out_h; k++) { // loop for images
int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5)
: static_cast<int>(ratio_h * k);
for (int l = 0; l < out_w; l++) {
int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5)
: static_cast<int>(ratio_w * l);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
output_t(i, j, k, l) = input_t(i, j, in_k, in_l);
}
}
}
}
}
template <typename T>
static void BilinearInterpolation(const Tensor& input, Tensor* output,
const float ratio_h, const float ratio_w,
const int in_h, const int in_w, const int n,
const int c, const int out_h, const int out_w,
const bool align_corners,
const bool align_mode) {
auto input_t = EigenTensor<T, 4>::From(input);
auto output_t = EigenTensor<T, 4>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vy_n, vy_s;
std::vector<float> vd_n, vd_s;
vy_n.reserve(out_h);
vy_s.reserve(out_h);
vd_n.reserve(out_h);
vd_s.reserve(out_h);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int k = 0; k < out_h; k++) {
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
{
vy_n[k] = y_n;
vy_s[k] = y_s;
vd_n[k] = d_n;
vd_s[k] = d_s;
}
}
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = (align_mode == 0 && !align_corners)
? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(4)
#endif
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
for (int k = 0; k < out_h; k++) { // loop for images
for (int l = 0; l < out_w; l++) {
// bilinear interpolation
T out_t = input_t(i, j, vy_n[k], vx_w[l]) * vd_s[k] * vd_e[l] +
input_t(i, j, vy_s[k], vx_w[l]) * vd_n[k] * vd_e[l] +
input_t(i, j, vy_n[k], vx_e[l]) * vd_s[k] * vd_w[l] +
input_t(i, j, vy_s[k], vx_e[l]) * vd_n[k] * vd_w[l];
output_t(i, j, k, l) = out_t;
}
}
}
}
}
template <typename T>
static void TrilinearInterpolation(
const Tensor& input, Tensor* output, const float ratio_d,
const float ratio_h, const float ratio_w, const int in_d, const int in_h,
const int in_w, const int n, const int c, const int out_d, const int out_h,
const int out_w, const bool align_corners, const bool align_mode) {
auto input_t = EigenTensor<T, 5>::From(input);
auto output_t = EigenTensor<T, 5>::From(*output);
bool align_flag = (align_mode == 0 && !align_corners);
std::vector<int> vt_f, vt_b;
std::vector<float> vd_f, vd_b;
vt_f.reserve(out_d);
vt_b.reserve(out_d);
vd_f.reserve(out_d);
vd_b.reserve(out_d);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int j = 0; j < out_d; j++) {
int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5)
: static_cast<int>(ratio_d * j);
t_f = (t_f > 0) ? t_f : 0;
int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1);
float idx_src_t = ratio_d * (j + 0.5) - 0.5;
idx_src_t = (idx_src_t > 0) ? idx_src_t : 0;
float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f;
float d_b = 1.f - d_f;
{
vt_f[j] = t_f;
vt_b[j] = t_b;
vd_f[j] = d_f;
vd_b[j] = d_b;
}
}
std::vector<int> vy_n, vy_s;
std::vector<float> vd_n, vd_s;
vy_n.reserve(out_h);
vy_s.reserve(out_h);
vd_n.reserve(out_h);
vd_s.reserve(out_h);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int k = 0; k < out_h; k++) {
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
{
vy_n[k] = y_n;
vy_s[k] = y_s;
vd_n[k] = d_n;
vd_s[k] = d_s;
}
}
std::vector<int> vx_w, vx_e;
std::vector<float> vd_w, vd_e;
vx_w.reserve(out_w);
vx_e.reserve(out_w);
vd_w.reserve(out_w);
vd_e.reserve(out_w);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int l = 0; l < out_w; l++) {
int x_w = (align_mode == 0 && !align_corners)
? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
{
vx_w[l] = x_w;
vx_e[l] = x_e;
vd_w[l] = d_w;
vd_e[l] = d_e;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(5)
#endif
for (int b = 0; b < n; b++) { // loop for batches
for (int i = 0; i < c; i++) { // loop for channels
for (int j = 0; j < out_d; j++) { // loop for D, H, W
for (int k = 0; k < out_h; k++) {
for (int l = 0; l < out_w; l++) {
// trilinear interpolation
T out_t = input_t(b, i, vt_f[j], vy_n[k], vx_w[l]) * vd_b[j] *
vd_s[k] * vd_e[l] +
input_t(b, i, vt_f[j], vy_n[k], vx_e[l]) * vd_b[j] *
vd_s[k] * vd_w[l] +
input_t(b, i, vt_f[j], vy_s[k], vx_w[l]) * vd_b[j] *
vd_n[k] * vd_e[l] +
input_t(b, i, vt_f[j], vy_s[k], vx_e[l]) * vd_b[j] *
vd_n[k] * vd_w[l] +
input_t(b, i, vt_b[j], vy_n[k], vx_w[l]) * vd_f[j] *
vd_s[k] * vd_e[l] +
input_t(b, i, vt_b[j], vy_n[k], vx_e[l]) * vd_f[j] *
vd_s[k] * vd_w[l] +
input_t(b, i, vt_b[j], vy_s[k], vx_w[l]) * vd_f[j] *
vd_n[k] * vd_e[l] +
input_t(b, i, vt_b[j], vy_s[k], vx_e[l]) * vd_f[j] *
vd_n[k] * vd_w[l];
output_t(b, i, j, k, l) = out_t;
}
}
}
}
}
}
template <typename T>
static void NearestNeighborInterpolateGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_h,
const float ratio_w, const int n, const int c, const int out_h,
const int out_w, const bool align_corners) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
for (int k = 0; k < out_h; k++) { // loop for images
int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5)
: static_cast<int>(ratio_h * k);
for (int l = 0; l < out_w; l++) {
int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5)
: static_cast<int>(ratio_w * l);
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
input_grad_t(i, j, in_k, in_l) += output_grad_t(i, j, k, l);
}
}
}
}
}
template <typename T>
static void BilinearInterpolationGrad(const Tensor& output_grad,
Tensor* input_grad, const float ratio_h,
const float ratio_w, const int in_h,
const int in_w, const int n, const int c,
const int out_h, const int out_w,
const bool align_corners,
const int align_mode) {
auto input_grad_t = EigenTensor<T, 4>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 4>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int k = 0; k < out_h; k++) { // loop for images
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
for (int l = 0; l < out_w; l++) {
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
for (int i = 0; i < n; i++) { // loop for batches
for (int j = 0; j < c; j++) { // loop for channels
// bilinear interpolation grad
const T grad = output_grad_t(i, j, k, l);
input_grad_t(i, j, y_n, x_w) += static_cast<T>(grad * d_s * d_e);
input_grad_t(i, j, y_s, x_w) += static_cast<T>(grad * d_n * d_e);
input_grad_t(i, j, y_n, x_e) += static_cast<T>(grad * d_s * d_w);
input_grad_t(i, j, y_s, x_e) += static_cast<T>(grad * d_n * d_w);
}
}
}
}
}
template <typename T>
static void TrilinearInterpolationGrad(
const Tensor& output_grad, Tensor* input_grad, const float ratio_d,
const float ratio_h, const float ratio_w, const int in_d, const int in_h,
const int in_w, const int n, const int c, const int out_d, const int out_h,
const int out_w, const bool align_corners, const int align_mode) {
auto input_grad_t = EigenTensor<T, 5>::From(*input_grad);
auto output_grad_t = EigenTensor<T, 5>::From(output_grad);
bool align_flag = (align_mode == 0 && !align_corners);
for (int j = 0; j < out_d; j++) { // loop for D
int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5)
: static_cast<int>(ratio_d * j);
t_f = (t_f > 0) ? t_f : 0;
int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1);
float idx_src_t = ratio_d * (j + 0.5) - 0.5;
idx_src_t = (idx_src_t > 0) ? idx_src_t : 0;
float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f;
float d_b = 1.f - d_f;
for (int k = 0; k < out_h; k++) { // loop for H
int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5)
: static_cast<int>(ratio_h * k);
y_n = (y_n > 0) ? y_n : 0;
int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1);
float idx_src_y = ratio_h * (k + 0.5) - 0.5;
idx_src_y = (idx_src_y > 0) ? idx_src_y : 0;
float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n;
float d_s = 1.f - d_n;
for (int l = 0; l < out_w; l++) { // loop for W
int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5)
: static_cast<int>(ratio_w * l);
x_w = (x_w > 0) ? x_w : 0;
int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1);
float idx_src_x = ratio_w * (l + 0.5) - 0.5;
idx_src_x = (idx_src_x > 0) ? idx_src_x : 0;
float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w;
float d_e = 1.f - d_w;
for (int b = 0; b < n; b++) { // loop for batches
for (int i = 0; i < c; i++) { // loop for channels
// trilinear interpolation grad
const T grad = output_grad_t(b, i, j, k, l);
input_grad_t(b, i, t_f, y_n, x_w) +=
static_cast<T>(grad * d_b * d_s * d_e);
input_grad_t(b, i, t_f, y_n, x_e) +=
static_cast<T>(grad * d_b * d_s * d_w);
input_grad_t(b, i, t_f, y_s, x_w) +=
static_cast<T>(grad * d_b * d_n * d_e);
input_grad_t(b, i, t_f, y_s, x_e) +=
static_cast<T>(grad * d_b * d_n * d_w);
input_grad_t(b, i, t_b, y_n, x_w) +=
static_cast<T>(grad * d_f * d_s * d_e);
input_grad_t(b, i, t_b, y_n, x_e) +=
static_cast<T>(grad * d_f * d_s * d_w);
input_grad_t(b, i, t_b, y_s, x_w) +=
static_cast<T>(grad * d_f * d_n * d_e);
input_grad_t(b, i, t_b, y_s, x_e) +=
static_cast<T>(grad * d_f * d_n * d_w);
}
}
}
}
}
}
template <typename T>
static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const int n = input.dims()[0];
const int c = input.dims()[1];
const int in_h = input.dims()[2];
const int in_w = input.dims()[3];
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_h = new_size[0];
out_w = new_size[1];
} else {
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_h = out_size_data[0];
out_w = out_size_data[1];
}
}
PADDLE_ENFORCE_GT(
out_h, 0,
"out_h in Attr(out_shape) of Op(interpolate) should be greater than 0.");
PADDLE_ENFORCE_GT(
out_w, 0,
"out_w in Attr(out_shape) of Op(interpolate) should be greater than 0.");
output->mutable_data<T>({n, c, out_h, out_w}, ctx.GetPlace());
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("bilinear" == interp_method) {
BilinearInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c,
out_h, out_w, align_corners, align_mode);
} else if ("nearest" == interp_method) {
NearestNeighborInterpolate<T>(input, output, ratio_h, ratio_w, n, c, out_h,
out_w, align_corners);
}
}
template <typename T>
static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx,
const Tensor& input, Tensor* output) {
const int n = input.dims()[0];
const int c = input.dims()[1];
const int in_d = input.dims()[2];
const int in_h = input.dims()[3];
const int in_w = input.dims()[4];
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_d = ctx.Attr<int>("out_d");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
} else {
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_d = static_cast<int>(in_d * scale);
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_d = out_size_data[0];
out_h = out_size_data[1];
out_w = out_size_data[2];
}
}
PADDLE_ENFORCE_GT(
out_d, 0,
"out_d in Attr(out_shape) of Op(interpolate) should be greater than 0.");
PADDLE_ENFORCE_GT(
out_h, 0,
"out_h in Attr(out_shape) of Op(interpolate) should be greater than 0.");
PADDLE_ENFORCE_GT(
out_w, 0,
"out_w in Attr(out_shape) of Op(interpolate) should be greater than 0.");
output->mutable_data<T>({n, c, out_d, out_h, out_w}, ctx.GetPlace());
if (in_d == out_d && in_h == out_h && in_w == out_w) {
framework::TensorCopy(input, ctx.GetPlace(), output);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(in_d) / out_d;
}
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("trilinear" == interp_method) {
TrilinearInterpolation<T>(input, output, ratio_d, ratio_h, ratio_w, in_d,
in_h, in_w, n, c, out_d, out_h, out_w,
align_corners, align_mode);
}
}
template <typename T>
static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor& output_grad) {
auto* input = ctx.Input<Tensor>("X");
const int n = input->dims()[0];
const int c = input->dims()[1];
const int in_h = input->dims()[2];
const int in_w = input->dims()[3];
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_h = out_size_data[0];
out_w = out_size_data[1];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_h = new_size[0];
out_w = new_size[1];
}
input_grad->mutable_data<T>({n, c, in_h, in_w}, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
math::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("bilinear" == interp_method) {
BilinearInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w,
in_h, in_w, n, c, out_h, out_w, align_corners,
align_mode);
} else if ("nearest" == interp_method) {
NearestNeighborInterpolateGrad<T>(output_grad, input_grad, ratio_h, ratio_w,
n, c, out_h, out_w, align_corners);
}
}
template <typename T>
static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx,
Tensor* input_grad, const Tensor output_grad) {
auto* input = ctx.Input<Tensor>("X");
const int n = input->dims()[0];
const int c = input->dims()[1];
const int in_d = input->dims()[2];
const int in_h = input->dims()[3];
const int in_w = input->dims()[4];
auto interp_method = ctx.Attr<std::string>("interp_method");
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int out_d = ctx.Attr<int>("out_d");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale;
auto scale_tensor = ctx.Input<Tensor>("Scale");
if (scale_tensor != nullptr) {
auto scale_data = get_new_data_from_tensor<float>(scale_tensor);
scale = scale_data[0];
} else {
scale = ctx.Attr<float>("scale");
}
if (scale > 0) {
out_d = static_cast<int>(in_d * scale);
out_h = static_cast<int>(in_h * scale);
out_w = static_cast<int>(in_w * scale);
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
auto out_size_data = get_new_data_from_tensor<int>(out_size);
out_d = out_size_data[0];
out_h = out_size_data[1];
out_w = out_size_data[2];
}
auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor");
if (list_new_size_tensor.size() > 0) {
// have size tensor
auto new_size = get_new_shape(list_new_size_tensor);
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
}
input_grad->mutable_data<T>({n, c, in_d, in_h, in_w}, ctx.GetPlace());
auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>();
math::SetConstant<platform::CPUDeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
if (in_d == out_d && in_h == out_h && in_w == out_w) {
framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(in_d) / out_d;
}
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if ("trilinear" == interp_method) {
TrilinearInterpolationGrad<T>(output_grad, input_grad, ratio_d, ratio_h,
ratio_w, in_d, in_h, in_w, n, c, out_d, out_h,
out_w, align_corners, align_mode);
}
}
template <typename T>
class InterpolateKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto input_dims = input->dims();
if (input_dims.size() == 4) { // 2D interpolation
Interpolate2DCPUFwd<T>(ctx, *input, output);
} else if (input_dims.size() == 5) { // 3D interpolation
Interpolate3DCPUFwd<T>(ctx, *input, output);
}
}
};
template <typename T>
class InterpolateGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto output_grad_dims = output_grad->dims();
if (output_grad_dims.size() == 4) { // 2D interpolation grad
Interpolate2DCPUBwd<T>(ctx, input_grad, *output_grad);
} else if (output_grad_dims.size() == 5) { // 3D interpolation grad
Interpolate3DCPUBwd<T>(ctx, input_grad, *output_grad);
}
}
};
} // namespace operators
} // namespace paddle
|
fgg_force.c | #ifdef FORCE
#ifdef THREE_PERIODIC
#include "SE_fgg.h"
static
int fgg_expansion_3p_force(const double x[3], const double q,
const SE_FGG_params* params,
double z2_0[P_MAX],
double z2_1[P_MAX],
double z2_2[P_MAX],
double zf_0[P_MAX],
double zf_1[P_MAX],
double zf_2[P_MAX])
{
// unpack params
const int p = params->P;
const int p_half = params->P_half;
const double h = params->h;
const double c=params->c;
double t0[3];
int idx_from[3],idx,p_from;
// compute index range and centering
if(is_odd(p))
{
for(int j=0; j<3; j++)
{
idx = (int) round(x[j]/h);
idx_from[j] = idx - p_half;
t0[j] = x[j]-h*idx;
}
}
else
{
for(int j=0; j<3; j++)
{
idx = (int) floor(x[j]/h);
idx_from[j] = idx - (p_half-1);
t0[j] = x[j]-h*idx;
}
}
// compute third factor
double z3 = exp(-c*(t0[0]*t0[0] + t0[1]*t0[1] + t0[2]*t0[2]) )*q;
// compute second factor by induction
double z_base0 = exp(2*c*h*t0[0]);
double z_base1 = exp(2*c*h*t0[1]);
double z_base2 = exp(2*c*h*t0[2]);
double z0, z1, z2;
if(is_odd(p))
{
z0 = pow(z_base0,-p_half);
z1 = pow(z_base1,-p_half);
z2 = pow(z_base2,-p_half);
p_from = -p_half;
}
else
{
z0 = pow(z_base0,-p_half+1);
z1 = pow(z_base1,-p_half+1);
z2 = pow(z_base2,-p_half+1);
p_from = -p_half+1;
}
z2_0[0] = z0;
z2_1[0] = z1;
z2_2[0] = z2;
// extra terms multiplied to calculate forces
zf_0[0] = -c*(t0[0]-p_from*h);
zf_1[0] = -c*(t0[1]-p_from*h);
zf_2[0] = -c*(t0[2]-p_from*h);
for(int i=1; i<p; i++)
{
z0 *=z_base0;
z1 *=z_base1;
z2 *=z_base2;
z2_0[i] = z0;
z2_1[i] = z1;
z2_2[i] = z2;
zf_0[i] = -c*(t0[0]-(p_from+i)*h);
zf_1[i] = -c*(t0[1]-(p_from+i)*h);
zf_2[i] = -c*(t0[2]-(p_from+i)*h);
}
// save some flops by multiplying one vector with z3 factor
for(int i=0; i<p; i++)
{
z2_0[i] *= z3;
}
return __IDX3_RMAJ(idx_from[0]+p_half,
idx_from[1]+p_half,
idx_from[2]+p_half,
params->npdims[1], params->npdims[2]);
}
#elif ONE_PERIODIC
// -----------------------------------------------------------------------------
static
int fgg_expansion_1p_force(const double x[3], const double q,
const SE_FGG_params* params,
double z2_0[P_MAX],
double z2_1[P_MAX],
double z2_2[P_MAX],
double zf_0[P_MAX],
double zf_1[P_MAX],
double zf_2[P_MAX])
{
const int p = params->P;
const int p_half = params->P_half;
const double h = params->h;
const double c=params->c;
const double a=params->a;
const double b=params->b;
double t0[3];
int idx;
int idx_from[3], p_from;
// compute index range and centering
if(is_odd(p))
{
idx = (int) round(x[0]/h);
idx_from[0] = idx - p_half;
t0[0] = x[0]-h*idx;
idx = (int) round((x[1]-(a+h/2))/h);
idx_from[1] = idx - p_half;
t0[1] = x[1] - (idx*h + (a+h/2));
idx = (int) round((x[2]-(b+h/2))/h);
idx_from[2] = idx - p_half;
t0[2] = x[2] - (idx*h + (b+h/2));
p_from = -p_half;
}
else
{
idx = (int) floor(x[0]/h);
idx_from[0] = idx - (p_half-1);
t0[0] = x[0]-h*idx;
idx = (int) floor((x[1]-(a+h/2))/h);
idx_from[1] = idx - (p_half-1);
t0[1] = x[1] - (idx*h + (a+h/2));
idx = (int) floor((x[2]-(b+h/2))/h);
idx_from[2] = idx - (p_half-1);
t0[2] = x[2] - (idx*h + (b+h/2));
p_from = -p_half + 1;
}
// compute third factor
double z3 = exp(-c*(t0[0]*t0[0] + t0[1]*t0[1] + t0[2]*t0[2]) )*q;
// compute second factor by induction
double z_base0 = exp(2*c*h*t0[0]);
double z_base1 = exp(2*c*h*t0[1]);
double z_base2 = exp(2*c*h*t0[2]);
double z0, z1, z2;
if(is_odd(p))
{
z0 = pow(z_base0,-p_half);
z1 = pow(z_base1,-p_half);
z2 = pow(z_base2,-p_half);
}
else
{
z0 = pow(z_base0,-p_half+1);
z1 = pow(z_base1,-p_half+1);
z2 = pow(z_base2,-p_half+1);
}
z2_0[0] = z0;
z2_1[0] = z1;
z2_2[0] = z2;
// extra terms multiplied to calculate forces
zf_0[0] = -2*c*(t0[0]-p_from*h);
zf_1[0] = -2*c*(t0[1]-p_from*h);
zf_2[0] = -2*c*(t0[2]-p_from*h);
for(int i=1; i<p; i++)
{
z0 *=z_base0;
z1 *=z_base1;
z2 *=z_base2;
z2_0[i] = z0;
z2_1[i] = z1;
z2_2[i] = z2;
zf_0[i] = -2*c*(t0[0]-(p_from+i)*h);
zf_1[i] = -2*c*(t0[1]-(p_from+i)*h);
zf_2[i] = -2*c*(t0[2]-(p_from+i)*h);
}
// save some flops by multiplying one vector with z3 factor
for(int i=0; i<p; i++)
{
z2_0[i] *= z3;
}
return __IDX3_RMAJ(idx_from[0]+p_half,
idx_from[1],
idx_from[2],
params->npdims[1], params->npdims[2]);
}
#endif // end Periodicty
// -----------------------------------------------------------------------------
void SE_FGG_expand_all_force(SE_FGG_work* work,
const SE_state* st,
const SE_FGG_params* params)
{
double xn[3] MEM_ALIGNED;
const int N = params->N;
const int P = params->P;
for(int n=0; n<N; n++)
{
// compute index and expansion vectors
xn[0] = st->x[n]; xn[1] = st->x[n+N]; xn[2] = st->x[n+2*N];
*(work->idx+n) = __FGG_EXPA_FORCE(xn,1,params,
work->zx+n*P,
work->zy+n*P,
work->zz+n*P,
work->zfx+n*P,
work->zfy+n*P,
work->zfz+n*P);
}
}
// -----------------------------------------------------------------------------
// vanilla grid gather to calculate forces
void SE_FGG_int_force(double* restrict force,
const SE_FGG_work* work,
SE_state* st,
const SE_FGG_params* params)
{
double z2_0[P_MAX] MEM_ALIGNED;
double z2_1[P_MAX] MEM_ALIGNED;
double z2_2[P_MAX] MEM_ALIGNED;
// to alculate forces
double zf_0[P_MAX] MEM_ALIGNED;
double zf_1[P_MAX] MEM_ALIGNED;
double zf_2[P_MAX] MEM_ALIGNED;
// unpack params
const double* restrict H = work->H;
const double* restrict zs = work->zs;
const int p = params->P;
const int N = params->N;
const double h=params->h;
const double h3 = h*h*h;
double xm[3];
int i,j,k,idx, zidx;
double force_m[3], cij,Hzc;
#ifdef CALC_ENERGY
double phi_m;
#endif
const int incrj = params->npdims[2]-p;
const int incri = params->npdims[2]*(params->npdims[1]-p);
#ifdef _OPENMP
#pragma omp for // work-share over OpenMP threads here
#endif
for(int m=0; m<N; m++)
{
xm[0] = st->x[m]; xm[1] = st->x[m+N]; xm[2] = st->x[m+2*N];
idx = __FGG_EXPA_FORCE(xm, 1, params, z2_0, z2_1, z2_2,zf_0,zf_1,zf_2);
force_m[0] = 0; force_m[1] = 0; force_m[2] = 0;
#ifdef CALC_ENERGY
phi_m = 0;
#endif
zidx = 0;
for(i = 0; i<p; i++)
{
for(j = 0; j<p; j++)
{
cij = z2_0[i]*z2_1[j];
for(k = 0; k<p; k++)
{
Hzc = H[idx]*zs[zidx]*z2_2[k]*cij;
#ifdef CALC_ENERGY
phi_m += Hzc;
#endif
force_m[0] += Hzc*zf_0[i];
force_m[1] += Hzc*zf_1[j];
force_m[2] += Hzc*zf_2[k];
idx++; zidx++;
}
idx += incrj;
}
idx += incri;
}
force[m ] = h3*force_m[0];
force[m+ N] = h3*force_m[1];
force[m+2*N] = h3*force_m[2];
#ifdef CALC_ENERGY
st->phi[m] = h3*phi_m;
#endif
}
}
// -----------------------------------------------------------------------------
void SE_FGG_int_split_SSE_dispatch_force(double* restrict force,
SE_state *st,
const SE_FGG_work* work,
const SE_FGG_params* params)
{
const int p = params->P;
const int incrj = params->dims[2]; // middle increment
const int incri = params->npdims[2]*(params->dims[1]);// outer increment
#if 0
// THIS BYPASSES THE FAST SSE KERNELS.
__DISPATCHER_MSG("[FGG INT SSE] SSE Disabled\n");
SE_FGG_int_split_force(force, st, work, params);
return;
#endif
// if P is odd, or if either increment is odd, fall back on vanilla
if( is_odd(p) || is_odd(incri) || is_odd(incrj) )
{
__DISPATCHER_MSG("[FGG INT SSE] SSE Abort (PARAMS)\n");
SE_FGG_int_split_force(force, st, work, params);
return;
}
// otherwise the preconditions for SSE codes are satisfied.
if(p==8)
{
// specific for p=8
__DISPATCHER_MSG("[FGG INT SSE] P=8\n");
SE_FGG_int_split_SSE_P8_force(force, st, work, params);
}
else if(p==16)
{
// specific for p=16
__DISPATCHER_MSG("[FGG INT SSE] P=16\n");
SE_FGG_int_split_SSE_P16_force(force, st, work, params);
}
else if(p%8==0)
{
// for p divisible by 8
__DISPATCHER_MSG("[FGG INT SSE] P unroll 8\n");
SE_FGG_int_split_SSE_u8_force(force, st, work, params);
}
else
{
// vanilla SSE code (any even p)
__DISPATCHER_MSG("[FGG INT SSE] Vanilla\n");
SE_FGG_int_split_SSE_force(force, st, work, params);
}
}
// -----------------------------------------------------------------------------
void SE_FGG_int_split_force(double* restrict force,
SE_state* st,
const SE_FGG_work* work,
const SE_FGG_params* params)
{
// unpack params
const double* restrict H = work->H;
const double* restrict zs = work->zs;
const double* restrict zx = work->zx;
const double* restrict zy = work->zy;
const double* restrict zz = work->zz;
const double* restrict zfx = work->zfx;
const double* restrict zfy = work->zfy;
const double* restrict zfz = work->zfz;
const int p = params->P;
const int N = params->N;
const double h = params->h;
const double h3= h*h*h;
int i,j,k,m,idx,idx_zs,idx_zz;
double force_m[3], cij, Hzc;
#ifdef CALC_ENERGY
double phi_m;
#endif
const int incrj = params->npdims[2]-p;
const int incri = params->npdims[2]*(params->npdims[1]-p);
#ifdef _OPENMP
#pragma omp for private(m)// work-share over OpenMP threads here
#endif
for(m=0; m<N; m++)
{
idx = work->idx[m];
force_m[0] = 0; force_m[1] = 0; force_m[2] = 0;
#ifdef CALC_ENERGY
phi_m = 0;
#endif
idx_zs = 0;
for(i = 0; i<p; i++)
{
for(j = 0; j<p; j++)
{
cij = zx[m*p+i]*zy[m*p+j];
idx_zz=m*p;
for(k = 0; k<p; k++)
{
Hzc = H[idx]*zs[idx_zs]*zz[idx_zz]*cij;
#ifdef CALC_ENERGY
phi_m += Hzc;
#endif
force_m[0] += Hzc*zfx[m*p+i];
force_m[1] += Hzc*zfy[m*p+j];
force_m[2] += Hzc*zfz[m*p+k];
idx++; idx_zs++; idx_zz++;
}
idx += incrj;
}
idx += incri;
}
force[m ] = h3*force_m[0];
force[m+ N] = h3*force_m[1];
force[m+2*N] = h3*force_m[2];
#ifdef CALC_ENERGY
st->phi[m] = h3*phi_m;
#endif
}
}
// -----------------------------------------------------------------------------
void SE_FGG_int_split_SSE_force(double* restrict force,
SE_state* st,
const SE_FGG_work* work,
const SE_FGG_params* params)
{
// unpack params
const double* restrict H = work->H;
const double* restrict zs = work->zs;
const double* restrict zx = work->zx;
const double* restrict zy = work->zy;
const double* restrict zz = work->zz;
const double* restrict zfx = work->zfx;
const double* restrict zfy = work->zfy;
const double* restrict zfz = work->zfz;
const int p = params->P;
const int N = params->N;
const double h=params->h;
const double h3 = h*h*h;
int i,j,k,m,idx,idx_zs,idx_zz;
double sx[2] MEM_ALIGNED;
double sy[2] MEM_ALIGNED;
double sz[2] MEM_ALIGNED;
__m128d rH0, rZZ0, rZS0,rZFZ0;
__m128d rC, rCX, rCY;
__m128d rFX, rFY, rFZ;
#ifdef CALC_ENERGY
double s[2] MEM_ALIGNED;
__m128d rP;
#endif
const int incrj = params->npdims[2]-p;
const int incri = params->npdims[2]*(params->npdims[1]-p);
for(m=0; m<N; m++)
{
idx = work->idx[m];
idx_zs = 0;
rFX = _mm_setzero_pd();
rFY = _mm_setzero_pd();
rFZ = _mm_setzero_pd();
#ifdef CALC_ENERGY
rP = _mm_setzero_pd();
#endif
if(idx%2==0) // H[idx] is 16-aligned so vectorization simple
{
for(i = 0; i<p; i++)
{
for(j = 0; j<p; j++)
{
double tmp = zx[m*p+i]*zy[m*p+j];
rC = _mm_set1_pd( tmp );
rCX = _mm_set1_pd( tmp * zfx[m*p+i]);
rCY = _mm_set1_pd( tmp * zfy[m*p+j]);
idx_zz=m*p;
for(k = 0; k<p; k+=2)
{
rZFZ0= _mm_load_pd( zfz+ m*p+k );
rH0 = _mm_load_pd( H + idx );
rZZ0 = _mm_load_pd( zz + idx_zz);
rZS0 = _mm_load_pd( zs + idx_zs);
rFX = _mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rCX),rZS0)));
rFY = _mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rCY),rZS0)));
rFZ = _mm_add_pd(rFZ,_mm_mul_pd(_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rC),rZS0)),rZFZ0));
#ifdef CALC_ENERGY
rP = _mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rC),rZS0)));
#endif
idx+=2;
idx_zs+=2;
idx_zz+=2;
}
idx += incrj;
}
idx += incri;
}
}
else // H[idx] not 16-aligned, so use non-aligned loads
{
for(i = 0; i<p; i++)
{
for(j = 0; j<p; j++)
{
double tmp = zx[m*p+i]*zy[m*p+j];
rC = _mm_set1_pd( tmp );
rCX = _mm_set1_pd( tmp * zfx[m*p+i]);
rCY = _mm_set1_pd( tmp * zfy[m*p+j]);
idx_zz=m*p;
for(k = 0; k<p; k+=2)
{
rZFZ0= _mm_load_pd( zfz + m*p+k );
rH0 = _mm_loadu_pd( H+idx );
rZZ0 = _mm_load_pd( zz + idx_zz);
rZS0 = _mm_load_pd( zs + idx_zs);
rFX = _mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rCX),rZS0)));
rFY = _mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rCY),rZS0)));
rFZ = _mm_add_pd(rFZ,_mm_mul_pd(_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rC),rZS0)),rZFZ0));
#ifdef CALC_ENERGY
rP = _mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rC),rZS0)));
#endif
idx+=2;
idx_zs+=2;
idx_zz+=2;
}
idx += incrj;
}
idx += incri;
}
}
_mm_store_pd(sx,rFX);
_mm_store_pd(sy,rFY);
_mm_store_pd(sz,rFZ);
force[m ] = h3*(sx[0]+sx[1]);
force[m+ N] = h3*(sy[0]+sy[1]);
force[m+2*N] = h3*(sz[0]+sz[1]);
#ifdef CALC_ENERGY
_mm_store_pd(s,rP);
st->phi[m] = h3*(s[0]+s[1]);
#endif
}
}
// -----------------------------------------------------------------------------
void SE_FGG_int_split_SSE_P8_force(double* restrict force,
SE_state* st,
const SE_FGG_work* work,
const SE_FGG_params* params)
{
// unpack params
const double* restrict H = work->H;
const double* restrict zs = work->zs;
const double* restrict zx = work->zx;
const double* restrict zy = work->zy;
const double* restrict zz = work->zz;
const double* restrict zfx = work->zfx;
const double* restrict zfy = work->zfy;
const double* restrict zfz = work->zfz;
/* ASSUME P=8 const int p = params->P; */
const int N = params->N;
const double h=params->h;
const double h3 = h*h*h;
int i,j,idx,idx_zs;
double sx[2] MEM_ALIGNED;
double sy[2] MEM_ALIGNED;
double sz[2] MEM_ALIGNED;
// hold entire zz vector
__m128d rZZ0, rZZ1, rZZ2, rZZ3;
__m128d rC, rCX, rCY;
__m128d rH0, rH1, rH2, rH3;
__m128d rZS0, rZS1, rZS2, rZS3;
__m128d rZFZ0, rZFZ1, rZFZ2, rZFZ3;
__m128d rFX, rFY, rFZ;
#ifdef CALC_ENERGY
double s[2] MEM_ALIGNED;
__m128d rP;
#endif
const int incrj = params->npdims[2]-8;
const int incri = params->npdims[2]*(params->npdims[1]-8);
for(int m=0; m<N; m++)
{
idx = work->idx[m];
idx_zs = 0;
rFX = _mm_setzero_pd();
rFY = _mm_setzero_pd();
rFZ = _mm_setzero_pd();
#ifdef CALC_ENERGY
rP = _mm_setzero_pd();
#endif
/* hoist load of ZZ vector */
rZZ0 = _mm_load_pd(zz + m*8 );
rZZ1 = _mm_load_pd(zz + m*8 + 2 );
rZZ2 = _mm_load_pd(zz + m*8 + 4 );
rZZ3 = _mm_load_pd(zz + m*8 + 6 );
/* hoist load of ZFZ vector */
rZFZ0 = _mm_load_pd(zfz + m*8 );
rZFZ1 = _mm_load_pd(zfz + m*8 + 2 );
rZFZ2 = _mm_load_pd(zfz + m*8 + 4 );
rZFZ3 = _mm_load_pd(zfz + m*8 + 6 );
if(idx%2==0) // H[idx] is 16-aligned so vectorization simple
{
for(i = 0; i<8; i++)
{
for(j = 0; j<8; j++)
{
double tmp = zx[m*8+i]*zy[m*8 + j];
rC = _mm_set1_pd( tmp );
rCX = _mm_set1_pd( tmp * zfx[m*8 + i]);
rCY = _mm_set1_pd( tmp * zfy[m*8 + j]);
rH0 = _mm_load_pd( H+idx );
rH1 = _mm_load_pd( H+idx + 2);
rH2 = _mm_load_pd( H+idx + 4);
rH3 = _mm_load_pd( H+idx + 6);
rZS0 = _mm_load_pd( zs + idx_zs );
rZS1 = _mm_load_pd( zs + idx_zs + 2);
rZS2 = _mm_load_pd( zs + idx_zs + 4);
rZS3 = _mm_load_pd( zs + idx_zs + 6);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rCX),rZS0)));
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH1,_mm_mul_pd(_mm_mul_pd(rZZ1,rCX),rZS1)));
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH2,_mm_mul_pd(_mm_mul_pd(rZZ2,rCX),rZS2)));
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH3,_mm_mul_pd(_mm_mul_pd(rZZ3,rCX),rZS3)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rCY),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH1,_mm_mul_pd(_mm_mul_pd(rZZ1,rCY),rZS1)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH2,_mm_mul_pd(_mm_mul_pd(rZZ2,rCY),rZS2)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH3,_mm_mul_pd(_mm_mul_pd(rZZ3,rCY),rZS3)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ0,rZZ0),rC),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH1,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ1,rZZ1),rC),rZS1)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH2,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ2,rZZ2),rC),rZS2)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH3,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ3,rZZ3),rC),rZS3)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rC),rZS0)));
rP =_mm_add_pd(rP,_mm_mul_pd(rH1,_mm_mul_pd(_mm_mul_pd(rZZ1,rC),rZS1)));
rP =_mm_add_pd(rP,_mm_mul_pd(rH2,_mm_mul_pd(_mm_mul_pd(rZZ2,rC),rZS2)));
rP =_mm_add_pd(rP,_mm_mul_pd(rH3,_mm_mul_pd(_mm_mul_pd(rZZ3,rC),rZS3)));
#endif
idx_zs +=8;
idx += incrj + 8;
}
idx += incri;
}
}
else // H[idx] not 16-aligned, so use non-aligned loads
{
for(i = 0; i<8; i++)
{
for(j = 0; j<8; j++)
{
double tmp = zx[m*8+i]*zy[m*8 + j];
rC = _mm_set1_pd( tmp );
rCX = _mm_set1_pd( tmp * zfx[m*8 + i]);
rCY = _mm_set1_pd( tmp * zfy[m*8 + j]);
rH0 = _mm_loadu_pd( H+idx );
rH1 = _mm_loadu_pd( H+idx + 2);
rH2 = _mm_loadu_pd( H+idx + 4);
rH3 = _mm_loadu_pd( H+idx + 6);
rZS0 = _mm_load_pd( zs + idx_zs );
rZS1 = _mm_load_pd( zs + idx_zs + 2);
rZS2 = _mm_load_pd( zs + idx_zs + 4);
rZS3 = _mm_load_pd( zs + idx_zs + 6);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rCX),rZS0)));
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH1,_mm_mul_pd(_mm_mul_pd(rZZ1,rCX),rZS1)));
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH2,_mm_mul_pd(_mm_mul_pd(rZZ2,rCX),rZS2)));
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH3,_mm_mul_pd(_mm_mul_pd(rZZ3,rCX),rZS3)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rCY),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH1,_mm_mul_pd(_mm_mul_pd(rZZ1,rCY),rZS1)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH2,_mm_mul_pd(_mm_mul_pd(rZZ2,rCY),rZS2)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH3,_mm_mul_pd(_mm_mul_pd(rZZ3,rCY),rZS3)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ0,rZZ0),rC),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH1,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ1,rZZ1),rC),rZS1)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH2,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ2,rZZ2),rC),rZS2)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH3,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ3,rZZ3),rC),rZS3)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rC),rZS0)));
rP =_mm_add_pd(rP,_mm_mul_pd(rH1,_mm_mul_pd(_mm_mul_pd(rZZ1,rC),rZS1)));
rP =_mm_add_pd(rP,_mm_mul_pd(rH2,_mm_mul_pd(_mm_mul_pd(rZZ2,rC),rZS2)));
rP =_mm_add_pd(rP,_mm_mul_pd(rH3,_mm_mul_pd(_mm_mul_pd(rZZ3,rC),rZS3)));
#endif
idx_zs +=8;
idx += incrj + 8;
}
idx += incri;
}
}
_mm_store_pd(sx,rFX);
_mm_store_pd(sy,rFY);
_mm_store_pd(sz,rFZ);
force[m ] = h3*(sx[0]+sx[1]);
force[m+ N] = h3*(sy[0]+sy[1]);
force[m+2*N] = h3*(sz[0]+sz[1]);
#ifdef CALC_ENERGY
_mm_store_pd(s,rP);
st->phi[m] = h3*(s[0]+s[1]);
#endif
}
}
// -----------------------------------------------------------------------------
void SE_FGG_int_split_SSE_P16_force(double* restrict force,
SE_state* st,
const SE_FGG_work* work,
const SE_FGG_params* params)
{
// unpack params
const double* restrict H = work->H;
const double* restrict zs = work->zs;
const double* restrict zx = work->zx;
const double* restrict zy = work->zy;
const double* restrict zz = work->zz;
const double* restrict zfx = work->zfx;
const double* restrict zfy = work->zfy;
const double* restrict zfz = work->zfz;
/* ASSUME P=16 const int p = params->P; */
const int N = params->N;
const double h=params->h;
const double h3 = h*h*h;
int i,j,idx,idx_zs;
double sx[2] MEM_ALIGNED;
double sy[2] MEM_ALIGNED;
double sz[2] MEM_ALIGNED;
// hold entire zz vector
__m128d rZZ0 , rZZ1 , rZZ2 , rZZ3 , rZZ4 , rZZ5 , rZZ6 , rZZ7;
__m128d rZFZ0, rZFZ1, rZFZ2, rZFZ3, rZFZ4, rZFZ5, rZFZ6, rZFZ7;
__m128d rC, rCX, rCY, rFX, rFY, rFZ;
__m128d rH0, rZS0;
#ifdef CALC_ENERGY
double s[2] MEM_ALIGNED;
__m128d rP;
#endif
const int incrj = params->npdims[2]-16;
const int incri = params->npdims[2]*(params->npdims[1]-16);
for(int m=0; m<N; m++)
{
idx = work->idx[m];
_mm_prefetch( (void*) (H+idx), _MM_HINT_T0);
idx_zs = 0;
_mm_prefetch( (void*) zs, _MM_HINT_T0);
rFX = _mm_setzero_pd();
rFY = _mm_setzero_pd();
rFZ = _mm_setzero_pd();
#ifdef CALC_ENERGY
rP = _mm_setzero_pd();
#endif
/* hoist load of ZZ vector */
rZZ0 = _mm_load_pd(zz + m*16 );
rZZ1 = _mm_load_pd(zz + m*16 + 2 );
rZZ2 = _mm_load_pd(zz + m*16 + 4 );
rZZ3 = _mm_load_pd(zz + m*16 + 6 );
rZZ4 = _mm_load_pd(zz + m*16 + 8 );
rZZ5 = _mm_load_pd(zz + m*16 + 10);
rZZ6 = _mm_load_pd(zz + m*16 + 12);
rZZ7 = _mm_load_pd(zz + m*16 + 14);
/* hoist load of ZFZ vector */
rZFZ0 = _mm_load_pd(zfz + m*16 );
rZFZ1 = _mm_load_pd(zfz + m*16 + 2 );
rZFZ2 = _mm_load_pd(zfz + m*16 + 4 );
rZFZ3 = _mm_load_pd(zfz + m*16 + 6 );
rZFZ4 = _mm_load_pd(zfz + m*16 + 8 );
rZFZ5 = _mm_load_pd(zfz + m*16 + 10);
rZFZ6 = _mm_load_pd(zfz + m*16 + 12);
rZFZ7 = _mm_load_pd(zfz + m*16 + 14);
if(idx%2==0) // H[idx] is 16-aligned so vectorization simple
{
for(i = 0; i<16; i++)
{
for(j = 0; j<16; j++)
{
double tmp = zx[m*16+i]*zy[m*16+j];
rC = _mm_set1_pd( tmp );
rCX = _mm_set1_pd( tmp * zfx[m*16+i] );
rCY = _mm_set1_pd( tmp * zfy[m*16+j]);
/* 0 */
rH0 = _mm_load_pd( H+idx );
rZS0 = _mm_load_pd( zs + idx_zs);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rCX),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rCY),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ0,rZZ0),rC),rZS0)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ0,rZZ0),rC),rZS0)));
#endif
/* 1 */
rH0 = _mm_load_pd( H+idx + 2);
rZS0 = _mm_load_pd( zs + idx_zs + 2);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ1,rCX),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ1,rCY),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ1,rZZ1),rC),rZS0)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ1,rC),rZS0)));
#endif
/* 2 */
rH0 = _mm_load_pd( H+idx + 4);
rZS0 = _mm_load_pd( zs + idx_zs + 4);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ2,rCX),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ2,rCY),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ2,rZZ2),rC),rZS0)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ2,rC),rZS0)));
#endif
/* 3 */
rH0 = _mm_load_pd( H+idx + 6);
rZS0 = _mm_load_pd( zs + idx_zs + 6);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ3,rCX),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ3,rCY),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ3,rZZ3),rC),rZS0)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ3,rC),rZS0)));
#endif
/* 4 */
rH0 = _mm_load_pd( H+idx + 8);
rZS0 = _mm_load_pd( zs + idx_zs + 8);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ4,rCX),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ4,rCY),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ4,rZZ4),rC),rZS0)));
#ifdef CALC_NERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ4,rC),rZS0)));
#endif
/* 5 */
rH0 = _mm_load_pd( H+idx + 10);
rZS0 = _mm_load_pd( zs + idx_zs + 10);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ5,rCX),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ5,rCY),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ5,rZZ5),rC),rZS0)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ5,rC),rZS0)));
#endif
/* 6 */
rH0 = _mm_load_pd( H+idx + 12);
rZS0 = _mm_load_pd( zs + idx_zs + 12);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ6,rCX),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ6,rCY),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ6,rZZ6),rC),rZS0)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ6,rC),rZS0)));
#endif
/* 7 */
rH0 = _mm_load_pd( H+idx + 14);
rZS0 = _mm_load_pd( zs + idx_zs + 14);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ7,rCX),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ7,rCY),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ7,rZZ7),rC),rZS0)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ7,rC),rZS0)));
#endif
idx_zs +=16;
idx += incrj + 16;
}
idx += incri;
}
}
else // H[idx] not 16-aligned, so use non-aligned loads
{
for(i = 0; i<16; i++)
{
for(j = 0; j<16; j++)
{
double tmp = zx[m*16+i]*zy[m*16+j];
rC = _mm_set1_pd( tmp );
rCX = _mm_set1_pd( tmp * zfx[m*16+i]);
rCY = _mm_set1_pd( tmp * zfy[m*16+j]);
/* 0 */
rH0 = _mm_loadu_pd( H+idx );
rZS0 = _mm_load_pd( zs + idx_zs);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rCX),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rCY),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ0,rZZ0),rC),rZS0)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rC),rZS0)));
#endif
/* 1 */
rH0 = _mm_loadu_pd( H+idx + 2);
rZS0 = _mm_load_pd( zs + idx_zs + 2);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ1,rCX),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ1,rCY),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ1,rZZ1),rC),rZS0)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ1,rC),rZS0)));
#endif
/* 2 */
rH0 = _mm_loadu_pd( H+idx + 4);
rZS0 = _mm_load_pd( zs + idx_zs + 4);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ2,rCX),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ2,rCY),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ2,rZZ2),rC),rZS0)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ2,rC),rZS0)));
#endif
/* 3 */
rH0 = _mm_loadu_pd( H+idx + 6);
rZS0 = _mm_load_pd( zs + idx_zs + 6);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ3,rCX),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ3,rCY),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ3,rZZ3),rC),rZS0)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ3,rC),rZS0)));
#endif
/* 4 */
rH0 = _mm_loadu_pd( H+idx + 8);
rZS0 = _mm_load_pd( zs + idx_zs + 8);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ4,rCX),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ4,rCY),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ4,rZZ4),rC),rZS0)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ4,rC),rZS0)));
#endif
/* 5 */
rH0 = _mm_loadu_pd( H+idx + 10);
rZS0 = _mm_load_pd( zs + idx_zs + 10);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ5,rCX),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ5,rCY),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ5,rZZ5),rC),rZS0)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ5,rC),rZS0)));
#endif
/* 6 */
rH0 = _mm_loadu_pd( H+idx + 12);
rZS0 = _mm_load_pd( zs + idx_zs + 12);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ6,rCX),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ6,rCY),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ6,rZZ6),rC),rZS0)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ6,rC),rZS0)));
#endif
/* 7 */
rH0 = _mm_loadu_pd( H+idx + 14);
rZS0 = _mm_load_pd( zs + idx_zs + 14);
rFX =_mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ7,rCX),rZS0)));
rFY =_mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ7,rCY),rZS0)));
rFZ =_mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ7,rZZ7),rC),rZS0)));
#ifdef CALC_ENERGY
rP =_mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ7,rC),rZS0)));
#endif
idx_zs +=16;
idx += incrj + 16;
}
idx += incri;
}
}
_mm_store_pd(sx,rFX);
_mm_store_pd(sy,rFY);
_mm_store_pd(sz,rFZ);
force[m ] = h3*(sx[0]+sx[1]);
force[m+ N] = h3*(sy[0]+sy[1]);
force[m+2*N] = h3*(sz[0]+sz[1]);
#ifdef CALC_ENERGY
_mm_store_pd(s,rP);
st->phi[m] = h3*(s[0]+s[1]);
#endif
}
}
// -----------------------------------------------------------------------------
void SE_FGG_int_split_SSE_u8_force(double* restrict force,
SE_state* st,
const SE_FGG_work* work,
const SE_FGG_params* params)
{
// unpack params
const double* restrict H = work->H;
const double* restrict zs = work->zs;
const double* restrict zx = work->zx;
const double* restrict zy = work->zy;
const double* restrict zz = work->zz;
const double* restrict zfx = work->zfx;
const double* restrict zfy = work->zfy;
const double* restrict zfz = work->zfz;
const int p = params->P;
const int N = params->N;
const double h=params->h;
const double h3 = h*h*h;
int i,j,k,idx,idx_zs,idx_zz;
double sx[2] MEM_ALIGNED;
double sy[2] MEM_ALIGNED;
double sz[2] MEM_ALIGNED;
__m128d rH0, rZZ0, rZS0, rZFZ0;
__m128d rH1, rZZ1, rZS1, rZFZ1;
__m128d rH2, rZZ2, rZS2, rZFZ2;
__m128d rH3, rZZ3, rZS3, rZFZ3;
__m128d rFX, rFY, rFZ;
__m128d rC, rCX, rCY;
#ifdef CALC_ENERGY
double s[2] MEM_ALIGNED;
__m128d rP;
#endif
const int incrj = params->npdims[2]-p;
const int incri = params->npdims[2]*(params->npdims[1]-p);
for(int m=0; m<N; m++)
{
idx = work->idx[m];
_mm_prefetch( (void*) (H+idx), _MM_HINT_T0);
idx_zs = 0;
_mm_prefetch( (void*) zs, _MM_HINT_T0);
rFX = _mm_setzero_pd();
rFY = _mm_setzero_pd();
rFZ = _mm_setzero_pd();
#ifdef CALC_ENERGY
rP = _mm_setzero_pd();
#endif
if(idx%2==0) // H[idx] is 16-aligned so vectorization simple
{
for(i = 0; i<p; i++)
{
for(j = 0; j<p; j++)
{
double tmp = zx[m*p+i]*zy[m*p+j];
rC = _mm_set1_pd( tmp );
rCX = _mm_set1_pd( tmp * zfx[m*p+i]);
rCY = _mm_set1_pd( tmp * zfy[m*p+j]);
idx_zz=m*p;
for(k = 0; k<p; k+=8)
{
rH0 = _mm_load_pd( H+idx );
rH1 = _mm_load_pd( H+idx + 2);
rH2 = _mm_load_pd( H+idx + 4);
rH3 = _mm_load_pd( H+idx + 6);
rZZ0 = _mm_load_pd( zz + idx_zz );
rZZ1 = _mm_load_pd( zz + idx_zz + 2);
rZZ2 = _mm_load_pd( zz + idx_zz + 4);
rZZ3 = _mm_load_pd( zz + idx_zz + 6);
rZS0 = _mm_load_pd( zs + idx_zs );
rZS1 = _mm_load_pd( zs + idx_zs + 2);
rZS2 = _mm_load_pd( zs + idx_zs + 4);
rZS3 = _mm_load_pd( zs + idx_zs + 6);
rZFZ0 = _mm_load_pd(zfz+ idx_zz );
rZFZ1 = _mm_load_pd(zfz+ idx_zz + 2);
rZFZ2 = _mm_load_pd(zfz+ idx_zz + 4);
rZFZ3 = _mm_load_pd(zfz+ idx_zz + 6);
rFX = _mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rCX),rZS0)));
rFX = _mm_add_pd(rFX,_mm_mul_pd(rH1,_mm_mul_pd(_mm_mul_pd(rZZ1,rCX),rZS1)));
rFX = _mm_add_pd(rFX,_mm_mul_pd(rH2,_mm_mul_pd(_mm_mul_pd(rZZ2,rCX),rZS2)));
rFX = _mm_add_pd(rFX,_mm_mul_pd(rH3,_mm_mul_pd(_mm_mul_pd(rZZ3,rCX),rZS3)));
rFY = _mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rCY),rZS0)));
rFY = _mm_add_pd(rFY,_mm_mul_pd(rH1,_mm_mul_pd(_mm_mul_pd(rZZ1,rCY),rZS1)));
rFY = _mm_add_pd(rFY,_mm_mul_pd(rH2,_mm_mul_pd(_mm_mul_pd(rZZ2,rCY),rZS2)));
rFY = _mm_add_pd(rFY,_mm_mul_pd(rH3,_mm_mul_pd(_mm_mul_pd(rZZ3,rCY),rZS3)));
rFZ = _mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ0,rZZ0),rC),rZS0)));
rFZ = _mm_add_pd(rFZ,_mm_mul_pd(rH1,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ1,rZZ1),rC),rZS1)));
rFZ = _mm_add_pd(rFZ,_mm_mul_pd(rH2,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ2,rZZ2),rC),rZS2)));
rFZ = _mm_add_pd(rFZ,_mm_mul_pd(rH3,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ3,rZZ3),rC),rZS3)));
#ifdef CALC_ENERGY
rP = _mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rC),rZS0)));
rP = _mm_add_pd(rP,_mm_mul_pd(rH1,_mm_mul_pd(_mm_mul_pd(rZZ1,rC),rZS1)));
rP = _mm_add_pd(rP,_mm_mul_pd(rH2,_mm_mul_pd(_mm_mul_pd(rZZ2,rC),rZS2)));
rP = _mm_add_pd(rP,_mm_mul_pd(rH3,_mm_mul_pd(_mm_mul_pd(rZZ3,rC),rZS3)));
#endif
idx+=8;
idx_zs+=8;
idx_zz+=8;
}
idx += incrj;
}
idx += incri;
}
}
else // H[idx] not 16-aligned, so use non-aligned load from H
{
for(i = 0; i<p; i++)
{
for(j = 0; j<p; j++)
{
double tmp = zx[m*p+i]*zy[m*p+j];
rC = _mm_set1_pd( tmp );
rCX = _mm_set1_pd( tmp * zfx[m*p+i]);
rCY = _mm_set1_pd( tmp * zfy[m*p+j]);
idx_zz=m*p;
for(k = 0; k<p; k+=8)
{
rH0 = _mm_loadu_pd( H+idx );
rH1 = _mm_loadu_pd( H+idx + 2);
rH2 = _mm_loadu_pd( H+idx + 4);
rH3 = _mm_loadu_pd( H+idx + 6);
rZZ0 = _mm_load_pd( zz + idx_zz );
rZZ1 = _mm_load_pd( zz + idx_zz + 2);
rZZ2 = _mm_load_pd( zz + idx_zz + 4);
rZZ3 = _mm_load_pd( zz + idx_zz + 6);
rZS0 = _mm_load_pd( zs + idx_zs );
rZS1 = _mm_load_pd( zs + idx_zs + 2);
rZS2 = _mm_load_pd( zs + idx_zs + 4);
rZS3 = _mm_load_pd( zs + idx_zs + 6);
rZFZ0 = _mm_load_pd(zfz+ idx_zz );
rZFZ1 = _mm_load_pd(zfz+ idx_zz + 2);
rZFZ2 = _mm_load_pd(zfz+ idx_zz + 4);
rZFZ3 = _mm_load_pd(zfz+ idx_zz + 6);
rFX = _mm_add_pd(rFX,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rCX),rZS0)));
rFX = _mm_add_pd(rFX,_mm_mul_pd(rH1,_mm_mul_pd(_mm_mul_pd(rZZ1,rCX),rZS1)));
rFX = _mm_add_pd(rFX,_mm_mul_pd(rH2,_mm_mul_pd(_mm_mul_pd(rZZ2,rCX),rZS2)));
rFX = _mm_add_pd(rFX,_mm_mul_pd(rH3,_mm_mul_pd(_mm_mul_pd(rZZ3,rCX),rZS3)));
rFY = _mm_add_pd(rFY,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rCY),rZS0)));
rFY = _mm_add_pd(rFY,_mm_mul_pd(rH1,_mm_mul_pd(_mm_mul_pd(rZZ1,rCY),rZS1)));
rFY = _mm_add_pd(rFY,_mm_mul_pd(rH2,_mm_mul_pd(_mm_mul_pd(rZZ2,rCY),rZS2)));
rFY = _mm_add_pd(rFY,_mm_mul_pd(rH3,_mm_mul_pd(_mm_mul_pd(rZZ3,rCY),rZS3)));
rFZ = _mm_add_pd(rFZ,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ0,rZZ0),rC),rZS0)));
rFZ = _mm_add_pd(rFZ,_mm_mul_pd(rH1,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ1,rZZ1),rC),rZS1)));
rFZ = _mm_add_pd(rFZ,_mm_mul_pd(rH2,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ2,rZZ2),rC),rZS2)));
rFZ = _mm_add_pd(rFZ,_mm_mul_pd(rH3,_mm_mul_pd(_mm_mul_pd(_mm_mul_pd(rZFZ3,rZZ3),rC),rZS3)));
#ifdef CALC_ENERGY
rP = _mm_add_pd(rP,_mm_mul_pd(rH0,_mm_mul_pd(_mm_mul_pd(rZZ0,rC),rZS0)));
rP = _mm_add_pd(rP,_mm_mul_pd(rH1,_mm_mul_pd(_mm_mul_pd(rZZ1,rC),rZS1)));
rP = _mm_add_pd(rP,_mm_mul_pd(rH2,_mm_mul_pd(_mm_mul_pd(rZZ2,rC),rZS2)));
rP = _mm_add_pd(rP,_mm_mul_pd(rH3,_mm_mul_pd(_mm_mul_pd(rZZ3,rC),rZS3)));
#endif
idx+=8;
idx_zs+=8;
idx_zz+=8;
}
idx += incrj;
}
idx += incri;
}
}
// done accumulating
_mm_store_pd(sx,rFX);
_mm_store_pd(sy,rFY);
_mm_store_pd(sz,rFZ);
force[m ] = h3*(sx[0]+sx[1]);
force[m+ N] = h3*(sy[0]+sy[1]);
force[m+2*N] = h3*(sz[0]+sz[1]);
#ifdef CALC_ENERGY
_mm_store_pd(s,rP);
st->phi[m] = h3*(s[0]+s[1]);
#endif
}
}
// -----------------------------------------------------------------------------
#ifdef __AVX__
void SE_FGG_int_split_AVX_dispatch_force(double* restrict force,
SE_state *st,
const SE_FGG_work* work,
const SE_FGG_params* params)
{
const int p = params->P;
const int incrj = params->dims[2]; // middle increment
const int incri = params->npdims[2]*(params->dims[1]);// outer increment
#ifdef AVX_FMA
__DISPATCHER_MSG("[FGG INT AVX-FMA] ");
#else
__DISPATCHER_MSG("[FGG INT AVX] ");
#endif
#if 0
// THIS BYPASSES THE FAST AVX KERNELS.
__DISPATCHER_MSG("AVX Disabled\n");
SE_FGG_int_split_force(force, st, work, params);
return;
#endif
// if P, incri or increments are not divisible by 4, fall back on vanilla
if( isnot_div_by_4(p) || isnot_div_by_4(incri) || isnot_div_by_4(incrj) )
{
__DISPATCHER_MSG("AVX Abort (PARAMS)\n");
SE_FGG_int_split_force(force, st, work, params);
return;
}
// otherwise the preconditions for AVX codes are satisfied.
if(p==8)
{
// specific for p=8
__DISPATCHER_MSG("P=8\n");
SE_FGG_int_split_AVX_P8_force(force, st, work, params);
}
else if(p==16)
{
// specific for p=16
__DISPATCHER_MSG("P=16\n");
SE_FGG_int_split_AVX_P16_force(force, st, work, params);
}
else if(p%8==0)
{
// for p divisible by 8
__DISPATCHER_MSG("P unroll 8\n");
SE_FGG_int_split_AVX_u8_force(force, st, work, params);
}
else if(p%4==0)
{
// vanilla AVX code (p divisible by 4)
__DISPATCHER_MSG("P unroll 4\n");
SE_FGG_int_split_AVX_force(force, st, work, params);
}
else
{
// vanilla SSE code (any even p)
__DISPATCHER_MSG("Vanilla\n");
SE_FGG_int_split_SSE_force(force, st, work, params);
}
}
// -----------------------------------------------------------------------------
void SE_FGG_int_split_AVX_force(double* restrict force,
SE_state* st,
const SE_FGG_work* work,
const SE_FGG_params* params)
{
// unpack params
const double* restrict H = work->H;
const double* restrict zs = work->zs;
const double* restrict zx = work->zx;
const double* restrict zy = work->zy;
const double* restrict zz = work->zz;
const double* restrict zfx = work->zfx;
const double* restrict zfy = work->zfy;
const double* restrict zfz = work->zfz;
const int p = params->P;
const int N = params->N;
const double h=params->h;
const double h3 = h*h*h;
int i,j,k,m,idx,idx_zs,idx_zz;
double sx[4] MEM_ALIGNED;
double sy[4] MEM_ALIGNED;
double sz[4] MEM_ALIGNED;
__m256d rH0, rZZ0, rZS0,rZFZ0;
__m256d rC, rCX, rCY;
__m256d rFX, rFY, rFZ;
#ifdef CALC_ENERGY
double s[4] MEM_ALIGNED;
__m256d rP;
#endif
const int incrj = params->npdims[2]-p;
const int incri = params->npdims[2]*(params->npdims[1]-p);
for(m=0; m<N; m++)
{
idx = work->idx[m];
idx_zs = 0;
rFX = _mm256_setzero_pd();
rFY = _mm256_setzero_pd();
rFZ = _mm256_setzero_pd();
#ifdef CALC_ENERGY
rP = _mm256_setzero_pd();
#endif
if(idx%4==0) // H[idx] is 32-aligned so vectorization simple
{
for(i = 0; i<p; i++)
{
for(j = 0; j<p; j++)
{
double tmp = zx[m*p+i]*zy[m*p+j];
rC = _mm256_set1_pd( tmp );
rCX = _mm256_set1_pd( tmp * zfx[m*p+i]);
rCY = _mm256_set1_pd( tmp * zfy[m*p+j]);
idx_zz=m*p;
for(k = 0; k<p; k+=4)
{
rZFZ0= _mm256_load_pd( zfz+ m*p+k );
rH0 = _mm256_load_pd( H + idx );
rZZ0 = _mm256_load_pd( zz + idx_zz);
rZS0 = _mm256_load_pd( zs + idx_zs);
rH0 = _mm256_mul_pd(rH0,_mm256_mul_pd(rZZ0,rZS0));
#ifdef AVX_FMA
rFX = _mm256_fmadd_pd(rH0,rCX,rFX);
rFY = _mm256_fmadd_pd(rH0,rCY,rFY);
rFZ = _mm256_fmadd_pd(rH0,_mm256_mul_pd(rC,rZFZ0),rFZ);
#else
rFX = _mm256_add_pd(rFX,_mm256_mul_pd(rH0,rCX));
rFY = _mm256_add_pd(rFY,_mm256_mul_pd(rH0,rCY));
rFZ = _mm256_add_pd(rFZ,_mm256_mul_pd(rH0,_mm256_mul_pd(rC,rZFZ0)));
#endif
/* rFX = _mm256_add_pd(rFX,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rCX),rZS0))); */
/* rFY = _mm256_add_pd(rFY,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rCY),rZS0))); */
/* rFZ = _mm256_add_pd(rFZ,_mm256_mul_pd(_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rC),rZS0)),rZFZ0)); */
#ifdef CALC_ENERGY
// rP = _mm256_add_pd(rP,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rC),rZS0)));
rP = _mm256_add_pd(rP,_mm256_mul_pd(rH0,rC));
#endif
idx+=4;
idx_zs+=4;
idx_zz+=4;
}
idx += incrj;
}
idx += incri;
}
}
else // H[idx] not 16-aligned, so use non-aligned loads
{
for(i = 0; i<p; i++)
{
for(j = 0; j<p; j++)
{
double tmp = zx[m*p+i]*zy[m*p+j];
rC = _mm256_set1_pd( tmp );
rCX = _mm256_set1_pd( tmp * zfx[m*p+i]);
rCY = _mm256_set1_pd( tmp * zfy[m*p+j]);
idx_zz=m*p;
for(k = 0; k<p; k+=4)
{
rZFZ0= _mm256_load_pd( zfz + m*p+k );
rH0 = _mm256_loadu_pd( H+idx );
rZZ0 = _mm256_load_pd( zz + idx_zz);
rZS0 = _mm256_load_pd( zs + idx_zs);
rH0 = _mm256_mul_pd(rH0,_mm256_mul_pd(rZZ0,rZS0));
#ifdef AVX_FMA
rFX = _mm256_fmadd_pd(rH0,rCX,rFX);
rFY = _mm256_fmadd_pd(rH0,rCY,rFY);
rFZ = _mm256_fmadd_pd(rH0,_mm256_mul_pd(rC,rZFZ0),rFZ);
#else
rFX = _mm256_add_pd(rFX,_mm256_mul_pd(rH0,rCX));
rFY = _mm256_add_pd(rFY,_mm256_mul_pd(rH0,rCY));
rFZ = _mm256_add_pd(rFZ,_mm256_mul_pd(rH0,_mm256_mul_pd(rC,rZFZ0)));
#endif
/* rFX = _mm256_add_pd(rFX,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rCX),rZS0))); */
/* rFY = _mm256_add_pd(rFY,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rCY),rZS0))); */
/* rFZ = _mm256_add_pd(rFZ,_mm256_mul_pd(_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rC),rZS0)),rZFZ0)); */
#ifdef CALC_ENERGY
// rP = _mm256_add_pd(rP,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rC),rZS0)));
rP = _mm256_add_pd(rP,_mm256_mul_pd(rH0,rC));
#endif
idx+=4;
idx_zs+=4;
idx_zz+=4;
}
idx += incrj;
}
idx += incri;
}
}
_mm256_store_pd(sx,rFX);
_mm256_store_pd(sy,rFY);
_mm256_store_pd(sz,rFZ);
force[m ] = h3*(sx[0]+sx[1]+sx[2]+sx[3]);
force[m+ N] = h3*(sy[0]+sy[1]+sy[2]+sy[3]);
force[m+2*N] = h3*(sz[0]+sz[1]+sz[2]+sz[3]);
#ifdef CALC_ENERGY
_mm256_store_pd(s,rP);
st->phi[m] = h3*(s[0]+s[1]+s[2]+s[3]);
#endif
}
}
// -----------------------------------------------------------------------------
void SE_FGG_int_split_AVX_P16_force(double* restrict force,
SE_state* st,
const SE_FGG_work* work,
const SE_FGG_params* params)
{
// unpack params
const double* restrict H = work->H;
const double* restrict zs = work->zs;
const double* restrict zx = work->zx;
const double* restrict zy = work->zy;
const double* restrict zz = work->zz;
const double* restrict zfx = work->zfx;
const double* restrict zfy = work->zfy;
const double* restrict zfz = work->zfz;
/* ASSUME P=16 const int p = params->P; */
const int N = params->N;
const double h=params->h;
const double h3=h*h*h;
int i,j,idx,idx_zs;
double sx[4] MEM_ALIGNED;
double sy[4] MEM_ALIGNED;
double sz[4] MEM_ALIGNED;
// hold entire zz vector
__m256d rZZ0, rZZ1, rZZ2, rZZ3;
__m256d rC, rCX, rCY;
__m256d rH0, rH1, rH2, rH3;
__m256d rZS0, rZS1, rZS2, rZS3;
__m256d rZFZ0, rZFZ1, rZFZ2, rZFZ3;
__m256d rFX, rFY, rFZ;
#ifdef CALC_ENERGY
double s[4] MEM_ALIGNED;
__m256d rP;
#endif
const int incrj = params->npdims[2]-16;
const int incri = params->npdims[2]*(params->npdims[1]-16);
for(int m=0; m<N; m++)
{
idx = work->idx[m];
_mm_prefetch( (void*) (H+idx), _MM_HINT_T0 );
idx_zs = 0;
_mm_prefetch( (void*) zs, _MM_HINT_T0 );
rFX = _mm256_setzero_pd();
rFY = _mm256_setzero_pd();
rFZ = _mm256_setzero_pd();
#ifdef CALC_ENERGY
rP = _mm256_setzero_pd();
#endif
/* hoist load of ZZ vector */
rZZ0 = _mm256_load_pd(zz + m*16 );
rZZ1 = _mm256_load_pd(zz + m*16 + 4 );
rZZ2 = _mm256_load_pd(zz + m*16 + 8 );
rZZ3 = _mm256_load_pd(zz + m*16 + 12);
/* hoist load of ZFZ vector */
rZFZ0 = _mm256_load_pd(zfz + m*16 );
rZFZ1 = _mm256_load_pd(zfz + m*16 + 4 );
rZFZ2 = _mm256_load_pd(zfz + m*16 + 8 );
rZFZ3 = _mm256_load_pd(zfz + m*16 + 12);
if(idx%4==0) // H[idx] is 32-aligned so vectorization simple
{
for(i = 0; i<16; i++)
{
for(j = 0; j<16; j++)
{
double tmp = zx[m*16+i]*zy[m*16 + j];
rC = _mm256_set1_pd( tmp );
rCX = _mm256_set1_pd( tmp * zfx[m*16 + i]);
rCY = _mm256_set1_pd( tmp * zfy[m*16 + j]);
rH0 = _mm256_load_pd( H+idx );
rH1 = _mm256_load_pd( H+idx + 4 );
rH2 = _mm256_load_pd( H+idx + 8 );
rH3 = _mm256_load_pd( H+idx + 12);
rZS0 = _mm256_load_pd( zs + idx_zs );
rZS1 = _mm256_load_pd( zs + idx_zs + 4 );
rZS2 = _mm256_load_pd( zs + idx_zs + 8 );
rZS3 = _mm256_load_pd( zs + idx_zs + 12);
rFX =_mm256_add_pd(rFX,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rCX),rZS0)));
rFX =_mm256_add_pd(rFX,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rCX),rZS1)));
rFX =_mm256_add_pd(rFX,_mm256_mul_pd(rH2,_mm256_mul_pd(_mm256_mul_pd(rZZ2,rCX),rZS2)));
rFX =_mm256_add_pd(rFX,_mm256_mul_pd(rH3,_mm256_mul_pd(_mm256_mul_pd(rZZ3,rCX),rZS3)));
rFY =_mm256_add_pd(rFY,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rCY),rZS0)));
rFY =_mm256_add_pd(rFY,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rCY),rZS1)));
rFY =_mm256_add_pd(rFY,_mm256_mul_pd(rH2,_mm256_mul_pd(_mm256_mul_pd(rZZ2,rCY),rZS2)));
rFY =_mm256_add_pd(rFY,_mm256_mul_pd(rH3,_mm256_mul_pd(_mm256_mul_pd(rZZ3,rCY),rZS3)));
rFZ =_mm256_add_pd(rFZ,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(_mm256_mul_pd(rZFZ0,rZZ0),rC),rZS0)));
rFZ =_mm256_add_pd(rFZ,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(_mm256_mul_pd(rZFZ1,rZZ1),rC),rZS1)));
rFZ =_mm256_add_pd(rFZ,_mm256_mul_pd(rH2,_mm256_mul_pd(_mm256_mul_pd(_mm256_mul_pd(rZFZ2,rZZ2),rC),rZS2)));
rFZ =_mm256_add_pd(rFZ,_mm256_mul_pd(rH3,_mm256_mul_pd(_mm256_mul_pd(_mm256_mul_pd(rZFZ3,rZZ3),rC),rZS3)));
#ifdef CALC_ENERGY
rP =_mm256_add_pd(rP,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rC),rZS0)));
rP =_mm256_add_pd(rP,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rC),rZS1)));
rP =_mm256_add_pd(rP,_mm256_mul_pd(rH2,_mm256_mul_pd(_mm256_mul_pd(rZZ2,rC),rZS2)));
rP =_mm256_add_pd(rP,_mm256_mul_pd(rH3,_mm256_mul_pd(_mm256_mul_pd(rZZ3,rC),rZS3)));
#endif
idx_zs +=16;
idx += incrj + 16;
}
idx += incri;
}
}
else // H[idx] not 32-aligned, so use non-aligned loads
{
for(i = 0; i<16; i++)
{
for(j = 0; j<16; j++)
{
double tmp = zx[m*16+i]*zy[m*16 + j];
rC = _mm256_set1_pd( tmp );
rCX = _mm256_set1_pd( tmp * zfx[m*16 + i]);
rCY = _mm256_set1_pd( tmp * zfy[m*16 + j]);
rH0 = _mm256_loadu_pd( H+idx );
rH1 = _mm256_loadu_pd( H+idx + 4 );
rH2 = _mm256_loadu_pd( H+idx + 8 );
rH3 = _mm256_loadu_pd( H+idx + 12);
rZS0 = _mm256_load_pd( zs + idx_zs );
rZS1 = _mm256_load_pd( zs + idx_zs + 4 );
rZS2 = _mm256_load_pd( zs + idx_zs + 8 );
rZS3 = _mm256_load_pd( zs + idx_zs + 12);
rFX =_mm256_add_pd(rFX,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rCX),rZS0)));
rFX =_mm256_add_pd(rFX,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rCX),rZS1)));
rFX =_mm256_add_pd(rFX,_mm256_mul_pd(rH2,_mm256_mul_pd(_mm256_mul_pd(rZZ2,rCX),rZS2)));
rFX =_mm256_add_pd(rFX,_mm256_mul_pd(rH3,_mm256_mul_pd(_mm256_mul_pd(rZZ3,rCX),rZS3)));
rFY =_mm256_add_pd(rFY,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rCY),rZS0)));
rFY =_mm256_add_pd(rFY,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rCY),rZS1)));
rFY =_mm256_add_pd(rFY,_mm256_mul_pd(rH2,_mm256_mul_pd(_mm256_mul_pd(rZZ2,rCY),rZS2)));
rFY =_mm256_add_pd(rFY,_mm256_mul_pd(rH3,_mm256_mul_pd(_mm256_mul_pd(rZZ3,rCY),rZS3)));
rFZ =_mm256_add_pd(rFZ,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(_mm256_mul_pd(rZFZ0,rZZ0),rC),rZS0)));
rFZ =_mm256_add_pd(rFZ,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(_mm256_mul_pd(rZFZ1,rZZ1),rC),rZS1)));
rFZ =_mm256_add_pd(rFZ,_mm256_mul_pd(rH2,_mm256_mul_pd(_mm256_mul_pd(_mm256_mul_pd(rZFZ2,rZZ2),rC),rZS2)));
rFZ =_mm256_add_pd(rFZ,_mm256_mul_pd(rH3,_mm256_mul_pd(_mm256_mul_pd(_mm256_mul_pd(rZFZ3,rZZ3),rC),rZS3)));
#ifdef CALC_ENERGY
rP =_mm256_add_pd(rP,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rC),rZS0)));
rP =_mm256_add_pd(rP,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rC),rZS1)));
rP =_mm256_add_pd(rP,_mm256_mul_pd(rH2,_mm256_mul_pd(_mm256_mul_pd(rZZ2,rC),rZS2)));
rP =_mm256_add_pd(rP,_mm256_mul_pd(rH3,_mm256_mul_pd(_mm256_mul_pd(rZZ3,rC),rZS3)));
#endif
idx_zs +=16;
idx += incrj + 16;
}
idx += incri;
}
}
_mm256_store_pd(sx,rFX);
_mm256_store_pd(sy,rFY);
_mm256_store_pd(sz,rFZ);
force[m ] = h3*(sx[0]+sx[1]+sx[2]+sx[3]);
force[m+ N] = h3*(sy[0]+sy[1]+sy[2]+sy[3]);
force[m+2*N] = h3*(sz[0]+sz[1]+sz[2]+sz[3]);
#ifdef CALC_ENERGY
_mm256_stream_pd(s,rP);
st->phi[m] = h3*(s[0]+s[1]+s[2]+s[3]);
#endif
}
}
// -----------------------------------------------------------------------------
void SE_FGG_int_split_AVX_P8_force(double* restrict force,
SE_state* st,
const SE_FGG_work* work,
const SE_FGG_params* params)
{
// unpack params
const double* restrict H = work->H;
const double* restrict zs = work->zs;
const double* restrict zx = work->zx;
const double* restrict zy = work->zy;
const double* restrict zz = work->zz;
const double* restrict zfx = work->zfx;
const double* restrict zfy = work->zfy;
const double* restrict zfz = work->zfz;
/* ASSUME P=8 const int p = params->P; */
const int N = params->N;
const double h=params->h;
const double h3=h*h*h;
int i,j,idx,idx_zs;
double sx[4] MEM_ALIGNED;
double sy[4] MEM_ALIGNED;
double sz[4] MEM_ALIGNED;
// hold entire zz vector
__m256d rZZ0, rZZ1;
__m256d rC, rCX, rCY;
__m256d rH0, rH1;
__m256d rZS0, rZS1;
__m256d rZFZ0, rZFZ1;
__m256d rFX, rFY, rFZ;
#ifdef CALC_ENERGY
double s[4] MEM_ALIGNED;
__m256d rP;
#endif
const int incrj = params->npdims[2]-8;
const int incri = params->npdims[2]*(params->npdims[1]-8);
for(int m=0; m<N; m++)
{
idx = work->idx[m];
idx_zs = 0;
rFX = _mm256_setzero_pd();
rFY = _mm256_setzero_pd();
rFZ = _mm256_setzero_pd();
#ifdef CALC_ENERGY
rP = _mm256_setzero_pd();
#endif
/* hoist load of ZZ vector */
rZZ0 = _mm256_load_pd(zz + m*8 );
rZZ1 = _mm256_load_pd(zz + m*8 + 4 );
/* hoist load of ZFZ vector */
rZFZ0 = _mm256_load_pd(zfz + m*8 );
rZFZ1 = _mm256_load_pd(zfz + m*8 + 4 );
if(idx%4==0) // H[idx] is 32-aligned so vectorization simple
{
for(i = 0; i<8; i++)
{
for(j = 0; j<8; j++)
{
double tmp = zx[m*8+i]*zy[m*8 + j];
rC = _mm256_set1_pd( tmp );
rCX = _mm256_set1_pd( tmp * zfx[m*8 + i]);
rCY = _mm256_set1_pd( tmp * zfy[m*8 + j]);
rH0 = _mm256_load_pd( H+idx );
rH1 = _mm256_load_pd( H+idx + 4 );
rZS0 = _mm256_load_pd( zs + idx_zs );
rZS1 = _mm256_load_pd( zs + idx_zs + 4 );
rFX =_mm256_add_pd(rFX,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rCX),rZS0)));
rFX =_mm256_add_pd(rFX,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rCX),rZS1)));
rFY =_mm256_add_pd(rFY,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rCY),rZS0)));
rFY =_mm256_add_pd(rFY,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rCY),rZS1)));
rFZ =_mm256_add_pd(rFZ,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(_mm256_mul_pd(rZFZ0,rZZ0),rC),rZS0)));
rFZ =_mm256_add_pd(rFZ,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(_mm256_mul_pd(rZFZ1,rZZ1),rC),rZS1)));
#ifdef CALC_ENERGY
rP =_mm256_add_pd(rP,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rC),rZS0)));
rP =_mm256_add_pd(rP,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rC),rZS1)));
#endif
idx_zs +=8;
idx += incrj + 8;
}
idx += incri;
}
}
else // H[idx] not 32-aligned, so use non-aligned loads
{
for(i = 0; i<8; i++)
{
for(j = 0; j<8; j++)
{
double tmp = zx[m*8+i]*zy[m*8 + j];
rC = _mm256_set1_pd( tmp );
rCX = _mm256_set1_pd( tmp * zfx[m*8 + i]);
rCY = _mm256_set1_pd( tmp * zfy[m*8 + j]);
rH0 = _mm256_loadu_pd( H+idx );
rH1 = _mm256_loadu_pd( H+idx + 4 );
rZS0 = _mm256_load_pd( zs + idx_zs );
rZS1 = _mm256_load_pd( zs + idx_zs + 4 );
rFX =_mm256_add_pd(rFX,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rCX),rZS0)));
rFX =_mm256_add_pd(rFX,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rCX),rZS1)));
rFY =_mm256_add_pd(rFY,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rCY),rZS0)));
rFY =_mm256_add_pd(rFY,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rCY),rZS1)));
rFZ =_mm256_add_pd(rFZ,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(_mm256_mul_pd(rZFZ0,rZZ0),rC),rZS0)));
rFZ =_mm256_add_pd(rFZ,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(_mm256_mul_pd(rZFZ1,rZZ1),rC),rZS1)));
#ifdef CALC_ENERGY
rP =_mm256_add_pd(rP,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rC),rZS0)));
rP =_mm256_add_pd(rP,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rC),rZS1)));
#endif
idx_zs +=8;
idx += incrj + 8;
}
idx += incri;
}
}
_mm256_store_pd(sx,rFX);
_mm256_store_pd(sy,rFY);
_mm256_store_pd(sz,rFZ);
force[m ] = h3*(sx[0]+sx[1]+sx[2]+sx[3]);
force[m+ N] = h3*(sy[0]+sy[1]+sy[2]+sy[3]);
force[m+2*N] = h3*(sz[0]+sz[1]+sz[2]+sz[3]);
#ifdef CALC_ENERGY
_mm256_store_pd(s,rP);
st->phi[m] = h3*(s[0]+s[1]+s[2]+s[3]);
#endif
}
}
// -----------------------------------------------------------------------------
void SE_FGG_int_split_AVX_u8_force(double* restrict force,
SE_state* st,
const SE_FGG_work* work,
const SE_FGG_params* params)
{
// unpack params
const double* restrict H = work->H;
const double* restrict zs = work->zs;
const double* restrict zx = work->zx;
const double* restrict zy = work->zy;
const double* restrict zz = work->zz;
const double* restrict zfx = work->zfx;
const double* restrict zfy = work->zfy;
const double* restrict zfz = work->zfz;
const int p = params->P;
const int N = params->N;
const double h=params->h;
const double h3=h*h*h;
int i,j,k,idx,idx_zs,idx_zz;
double sx[4] MEM_ALIGNED;
double sy[4] MEM_ALIGNED;
double sz[4] MEM_ALIGNED;
__m256d rH0, rZZ0, rZS0, rZFZ0;
__m256d rH1, rZZ1, rZS1, rZFZ1;
__m256d rFX, rFY, rFZ;
__m256d rC, rCX, rCY;
#ifdef CALC_ENERGY
double s[4] MEM_ALIGNED;
__m256d rP;
#endif
const int incrj = params->npdims[2]-p;
const int incri = params->npdims[2]*(params->npdims[1]-p);
for(int m=0; m<N; m++)
{
idx = work->idx[m];
_mm_prefetch( (void*) (H+idx), _MM_HINT_T0);
idx_zs = 0;
_mm_prefetch( (void*) zs, _MM_HINT_T0);
rFX = _mm256_setzero_pd();
rFY = _mm256_setzero_pd();
rFZ = _mm256_setzero_pd();
#ifdef CALC_ENERGY
rP = _mm256_setzero_pd();
#endif
if(idx%4==0) // H[idx] is 32-aligned so vectorization simple
{
for(i = 0; i<p; i++)
{
for(j = 0; j<p; j++)
{
double tmp = zx[m*p+i]*zy[m*p+j];
rC = _mm256_set1_pd( tmp );
rCX = _mm256_set1_pd( tmp * zfx[m*p+i]);
rCY = _mm256_set1_pd( tmp * zfy[m*p+j]);
idx_zz=m*p;
for(k = 0; k<p; k+=8)
{
rH0 = _mm256_load_pd( H+idx );
rH1 = _mm256_load_pd( H+idx + 4);
rZZ0 = _mm256_load_pd( zz + idx_zz );
rZZ1 = _mm256_load_pd( zz + idx_zz + 4);
rZS0 = _mm256_load_pd( zs + idx_zs );
rZS1 = _mm256_load_pd( zs + idx_zs + 4);
rZFZ0 = _mm256_load_pd(zfz+ idx_zz );
rZFZ1 = _mm256_load_pd(zfz+ idx_zz + 4);
rFX = _mm256_add_pd(rFX,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rCX),rZS0)));
rFX = _mm256_add_pd(rFX,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rCX),rZS1)));
rFY = _mm256_add_pd(rFY,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rCY),rZS0)));
rFY = _mm256_add_pd(rFY,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rCY),rZS1)));
rFZ = _mm256_add_pd(rFZ,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(_mm256_mul_pd(rZFZ0,rZZ0),rC),rZS0)));
rFZ = _mm256_add_pd(rFZ,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(_mm256_mul_pd(rZFZ1,rZZ1),rC),rZS1)));
#ifdef CALC_ENERGY
rP = _mm256_add_pd(rP,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rC),rZS0)));
rP = _mm256_add_pd(rP,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rC),rZS1)));
#endif
idx+=8;
idx_zs+=8;
idx_zz+=8;
}
idx += incrj;
}
idx += incri;
}
}
else // H[idx] not 32-aligned, so use non-aligned load from H
{
for(i = 0; i<p; i++)
{
for(j = 0; j<p; j++)
{
double tmp = zx[m*p+i]*zy[m*p+j];
rC = _mm256_set1_pd( tmp );
rCX = _mm256_set1_pd( tmp * zfx[m*p+i]);
rCY = _mm256_set1_pd( tmp * zfy[m*p+j]);
idx_zz=m*p;
for(k = 0; k<p; k+=8)
{
rH0 = _mm256_loadu_pd( H+idx );
rH1 = _mm256_loadu_pd( H+idx + 4);
rZZ0 = _mm256_load_pd( zz + idx_zz );
rZZ1 = _mm256_load_pd( zz + idx_zz + 4);
rZS0 = _mm256_load_pd( zs + idx_zs );
rZS1 = _mm256_load_pd( zs + idx_zs + 4);
rZFZ0 = _mm256_load_pd(zfz+ idx_zz );
rZFZ1 = _mm256_load_pd(zfz+ idx_zz + 4);
rFX = _mm256_add_pd(rFX,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rCX),rZS0)));
rFX = _mm256_add_pd(rFX,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rCX),rZS1)));
rFY = _mm256_add_pd(rFY,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rCY),rZS0)));
rFY = _mm256_add_pd(rFY,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rCY),rZS1)));
rFZ = _mm256_add_pd(rFZ,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(_mm256_mul_pd(rZFZ0,rZZ0),rC),rZS0)));
rFZ = _mm256_add_pd(rFZ,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(_mm256_mul_pd(rZFZ1,rZZ1),rC),rZS1)));
#ifdef CALC_ENERGY
rP = _mm256_add_pd(rP,_mm256_mul_pd(rH0,_mm256_mul_pd(_mm256_mul_pd(rZZ0,rC),rZS0)));
rP = _mm256_add_pd(rP,_mm256_mul_pd(rH1,_mm256_mul_pd(_mm256_mul_pd(rZZ1,rC),rZS1)));
#endif
idx+=8;
idx_zs+=8;
idx_zz+=8;
}
idx += incrj;
}
idx += incri;
}
}
// done accumulating
_mm256_store_pd(sx,rFX);
_mm256_store_pd(sy,rFY);
_mm256_store_pd(sz,rFZ);
force[m ] = h3*(sx[0]+sx[1]+sx[2]+sx[3]);
force[m+ N] = h3*(sy[0]+sy[1]+sy[2]+sy[3]);
force[m+2*N] = h3*(sz[0]+sz[1]+sz[2]+sz[3]);
#ifdef CALC_ENERGY
_mm256_store_pd(s,rP);
st->phi[m] = h3*(s[0]+s[1]+s[2]+s[3]);
#endif
}
}
#endif // AVX
#endif //end FORCE
|
template_vector.h | /* Copyright 2015 The math21 Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#pragma once
#include "inner.h"
namespace math21 {
template<typename T>
NumB math21_template_vector_is_equal_cpu(NumN n, const T *x, const T *y, NumR epsilon, NumN logLevel) {
x -= 1;
y -= 1;
NumN id;
//#pragma omp parallel for
for (id = 1; id <= n; ++id) {
NumR tmp = (NumR) y[id] - (NumR) x[id];
if (xjabs(tmp) > epsilon) {
break;
}
}
if (id <= n) {
if (logLevel) {
printf("different from postion %d\n", id);
}
return 0;
}
return 1;
}
template<typename T>
NumR math21_template_vector_distance(NumN n, const T *A, const T *B, NumR norm) {
MATH21_ASSERT(norm > 0);
A -= 1;
B -= 1;
NumN i;
NumR sum = 0;
if (norm == 1) {
//#pragma omp parallel for
for (i = 1; i <= n; ++i) sum += xjabs(A[i] - B[i]);
} else if (norm == 2) {
//#pragma omp parallel for
for (i = 1; i <= n; ++i) sum += xjsquare(A[i] - B[i]);
sum = xjsqrt(sum);
} else {
for (i = 1; i <= n; ++i) sum += xjpow(xjabs(A[i] - B[i]), norm);
sum = xjpow(sum, 1 / norm);
}
MATH21_ASSERT_FINITE(math21_operator_isfinite(sum))
return sum;
}
template<typename T>
T math21_template_vector_max(NumN n, const T *x, NumN &index) {
x -= 1;
NumN i;
MATH21_ASSERT(n >= 1);
NumN k = 1;
for (i = 1; i <= n; ++i) {
if (x[i] > x[k]) {
k = i;
}
}
index = k;
return x[k];
}
template<typename T>
T math21_template_vector_min(NumN n, const T *x, NumN &index) {
x -= 1;
NumN i;
MATH21_ASSERT(n >= 1);
NumN k = 1;
for (i = 1; i <= n; ++i) {
if (x[i] < x[k]) {
k = i;
}
}
index = k;
return x[k];
}
// see math21_operator_matrix_reverse_y_axis
template<typename T>
void math21_template_tensor_reverse_axis_3_in_d3_cpu(T *x, NumN d1, NumN d2, NumN d3) {
x -= 1;
NumN n = d1 * d2 * (d3 / 2);
NumN id;
#pragma omp parallel for
for (id = 1; id <= n; ++id) {
NumN i1, i2, i3, ix, iy;
math21_device_index_1d_to_3d_fast(&i1, &i2, &i3, id, d2, d3 / 2);
math21_device_index_3d_to_1d_fast(i1, i2, i3, &ix, d2, d3);
math21_device_index_3d_to_1d_fast(i1, i2, d3 + 1 - i3, &iy, d2, d3);
m21_swap(x[ix], x[iy]);
}
}
} |
omp-low.c | /* Lowering pass for OMP directives. Converts OMP directives into explicit
calls to the runtime library (libgomp), data marshalling to implement data
sharing and copying clauses, offloading to accelerators, and more.
Contributed by Diego Novillo <dnovillo@redhat.com>
Copyright (C) 2005-2018 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "tree.h"
#include "gimple.h"
#include "tree-pass.h"
#include "ssa.h"
#include "cgraph.h"
#include "pretty-print.h"
#include "diagnostic-core.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "internal-fn.h"
#include "gimple-fold.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
#include "tree-iterator.h"
#include "tree-inline.h"
#include "langhooks.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "splay-tree.h"
#include "omp-general.h"
#include "omp-low.h"
#include "omp-grid.h"
#include "gimple-low.h"
#include "symbol-summary.h"
#include "tree-nested.h"
#include "context.h"
#include "gomp-constants.h"
#include "gimple-pretty-print.h"
#include "hsa-common.h"
#include "stringpool.h"
#include "attribs.h"
/* Lowering of OMP parallel and workshare constructs proceeds in two
phases. The first phase scans the function looking for OMP statements
and then for variables that must be replaced to satisfy data sharing
clauses. The second phase expands code for the constructs, as well as
re-gimplifying things when variables have been replaced with complex
expressions.
Final code generation is done by pass_expand_omp. The flowgraph is
scanned for regions which are then moved to a new
function, to be invoked by the thread library, or offloaded. */
/* Context structure. Used to store information about each parallel
directive in the code. */
struct omp_context
{
/* This field must be at the beginning, as we do "inheritance": Some
callback functions for tree-inline.c (e.g., omp_copy_decl)
receive a copy_body_data pointer that is up-casted to an
omp_context pointer. */
copy_body_data cb;
/* The tree of contexts corresponding to the encountered constructs. */
struct omp_context *outer;
gimple *stmt;
/* Map variables to fields in a structure that allows communication
between sending and receiving threads. */
splay_tree field_map;
tree record_type;
tree sender_decl;
tree receiver_decl;
/* These are used just by task contexts, if task firstprivate fn is
needed. srecord_type is used to communicate from the thread
that encountered the task construct to task firstprivate fn,
record_type is allocated by GOMP_task, initialized by task firstprivate
fn and passed to the task body fn. */
splay_tree sfield_map;
tree srecord_type;
/* A chain of variables to add to the top-level block surrounding the
construct. In the case of a parallel, this is in the child function. */
tree block_vars;
/* Label to which GOMP_cancel{,llation_point} and explicit and implicit
barriers should jump to during omplower pass. */
tree cancel_label;
/* The sibling GIMPLE_OMP_FOR simd with _simt_ clause or NULL
otherwise. */
gimple *simt_stmt;
/* Nesting depth of this context. Used to beautify error messages re
invalid gotos. The outermost ctx is depth 1, with depth 0 being
reserved for the main body of the function. */
int depth;
/* True if this parallel directive is nested within another. */
bool is_nested;
/* True if this construct can be cancelled. */
bool cancellable;
};
static splay_tree all_contexts;
static int taskreg_nesting_level;
static int target_nesting_level;
static bitmap task_shared_vars;
static vec<omp_context *> taskreg_contexts;
static void scan_omp (gimple_seq *, omp_context *);
static tree scan_omp_1_op (tree *, int *, void *);
#define WALK_SUBSTMTS \
case GIMPLE_BIND: \
case GIMPLE_TRY: \
case GIMPLE_CATCH: \
case GIMPLE_EH_FILTER: \
case GIMPLE_TRANSACTION: \
/* The sub-statements for these should be walked. */ \
*handled_ops_p = false; \
break;
/* Return true if CTX corresponds to an oacc parallel region. */
static bool
is_oacc_parallel (omp_context *ctx)
{
enum gimple_code outer_type = gimple_code (ctx->stmt);
return ((outer_type == GIMPLE_OMP_TARGET)
&& (gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_OACC_PARALLEL));
}
/* Return true if CTX corresponds to an oacc kernels region. */
static bool
is_oacc_kernels (omp_context *ctx)
{
enum gimple_code outer_type = gimple_code (ctx->stmt);
return ((outer_type == GIMPLE_OMP_TARGET)
&& (gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_OACC_KERNELS));
}
/* If DECL is the artificial dummy VAR_DECL created for non-static
data member privatization, return the underlying "this" parameter,
otherwise return NULL. */
tree
omp_member_access_dummy_var (tree decl)
{
if (!VAR_P (decl)
|| !DECL_ARTIFICIAL (decl)
|| !DECL_IGNORED_P (decl)
|| !DECL_HAS_VALUE_EXPR_P (decl)
|| !lang_hooks.decls.omp_disregard_value_expr (decl, false))
return NULL_TREE;
tree v = DECL_VALUE_EXPR (decl);
if (TREE_CODE (v) != COMPONENT_REF)
return NULL_TREE;
while (1)
switch (TREE_CODE (v))
{
case COMPONENT_REF:
case MEM_REF:
case INDIRECT_REF:
CASE_CONVERT:
case POINTER_PLUS_EXPR:
v = TREE_OPERAND (v, 0);
continue;
case PARM_DECL:
if (DECL_CONTEXT (v) == current_function_decl
&& DECL_ARTIFICIAL (v)
&& TREE_CODE (TREE_TYPE (v)) == POINTER_TYPE)
return v;
return NULL_TREE;
default:
return NULL_TREE;
}
}
/* Helper for unshare_and_remap, called through walk_tree. */
static tree
unshare_and_remap_1 (tree *tp, int *walk_subtrees, void *data)
{
tree *pair = (tree *) data;
if (*tp == pair[0])
{
*tp = unshare_expr (pair[1]);
*walk_subtrees = 0;
}
else if (IS_TYPE_OR_DECL_P (*tp))
*walk_subtrees = 0;
return NULL_TREE;
}
/* Return unshare_expr (X) with all occurrences of FROM
replaced with TO. */
static tree
unshare_and_remap (tree x, tree from, tree to)
{
tree pair[2] = { from, to };
x = unshare_expr (x);
walk_tree (&x, unshare_and_remap_1, pair, NULL);
return x;
}
/* Convenience function for calling scan_omp_1_op on tree operands. */
static inline tree
scan_omp_op (tree *tp, omp_context *ctx)
{
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.info = ctx;
wi.want_locations = true;
return walk_tree (tp, scan_omp_1_op, &wi, NULL);
}
static void lower_omp (gimple_seq *, omp_context *);
static tree lookup_decl_in_outer_ctx (tree, omp_context *);
static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
/* Return true if CTX is for an omp parallel. */
static inline bool
is_parallel_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
}
/* Return true if CTX is for an omp task. */
static inline bool
is_task_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
}
/* Return true if CTX is for an omp taskloop. */
static inline bool
is_taskloop_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_TASKLOOP;
}
/* Return true if CTX is for an omp parallel or omp task. */
static inline bool
is_taskreg_ctx (omp_context *ctx)
{
return is_parallel_ctx (ctx) || is_task_ctx (ctx);
}
/* Return true if EXPR is variable sized. */
static inline bool
is_variable_sized (const_tree expr)
{
return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
}
/* Lookup variables. The "maybe" form
allows for the variable form to not have been entered, otherwise we
assert that the variable must have been entered. */
static inline tree
lookup_decl (tree var, omp_context *ctx)
{
tree *n = ctx->cb.decl_map->get (var);
return *n;
}
static inline tree
maybe_lookup_decl (const_tree var, omp_context *ctx)
{
tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
return n ? *n : NULL_TREE;
}
static inline tree
lookup_field (tree var, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
return (tree) n->value;
}
static inline tree
lookup_sfield (splay_tree_key key, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->sfield_map
? ctx->sfield_map : ctx->field_map, key);
return (tree) n->value;
}
static inline tree
lookup_sfield (tree var, omp_context *ctx)
{
return lookup_sfield ((splay_tree_key) var, ctx);
}
static inline tree
maybe_lookup_field (splay_tree_key key, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->field_map, key);
return n ? (tree) n->value : NULL_TREE;
}
static inline tree
maybe_lookup_field (tree var, omp_context *ctx)
{
return maybe_lookup_field ((splay_tree_key) var, ctx);
}
/* Return true if DECL should be copied by pointer. SHARED_CTX is
the parallel context if DECL is to be shared. */
static bool
use_pointer_for_field (tree decl, omp_context *shared_ctx)
{
if (AGGREGATE_TYPE_P (TREE_TYPE (decl))
|| TYPE_ATOMIC (TREE_TYPE (decl)))
return true;
/* We can only use copy-in/copy-out semantics for shared variables
when we know the value is not accessible from an outer scope. */
if (shared_ctx)
{
gcc_assert (!is_gimple_omp_oacc (shared_ctx->stmt));
/* ??? Trivially accessible from anywhere. But why would we even
be passing an address in this case? Should we simply assert
this to be false, or should we have a cleanup pass that removes
these from the list of mappings? */
if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
return true;
/* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
without analyzing the expression whether or not its location
is accessible to anyone else. In the case of nested parallel
regions it certainly may be. */
if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
return true;
/* Do not use copy-in/copy-out for variables that have their
address taken. */
if (TREE_ADDRESSABLE (decl))
return true;
/* lower_send_shared_vars only uses copy-in, but not copy-out
for these. */
if (TREE_READONLY (decl)
|| ((TREE_CODE (decl) == RESULT_DECL
|| TREE_CODE (decl) == PARM_DECL)
&& DECL_BY_REFERENCE (decl)))
return false;
/* Disallow copy-in/out in nested parallel if
decl is shared in outer parallel, otherwise
each thread could store the shared variable
in its own copy-in location, making the
variable no longer really shared. */
if (shared_ctx->is_nested)
{
omp_context *up;
for (up = shared_ctx->outer; up; up = up->outer)
if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
break;
if (up)
{
tree c;
for (c = gimple_omp_taskreg_clauses (up->stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& OMP_CLAUSE_DECL (c) == decl)
break;
if (c)
goto maybe_mark_addressable_and_ret;
}
}
/* For tasks avoid using copy-in/out. As tasks can be
deferred or executed in different thread, when GOMP_task
returns, the task hasn't necessarily terminated. */
if (is_task_ctx (shared_ctx))
{
tree outer;
maybe_mark_addressable_and_ret:
outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
if (is_gimple_reg (outer) && !omp_member_access_dummy_var (outer))
{
/* Taking address of OUTER in lower_send_shared_vars
might need regimplification of everything that uses the
variable. */
if (!task_shared_vars)
task_shared_vars = BITMAP_ALLOC (NULL);
bitmap_set_bit (task_shared_vars, DECL_UID (outer));
TREE_ADDRESSABLE (outer) = 1;
}
return true;
}
}
return false;
}
/* Construct a new automatic decl similar to VAR. */
static tree
omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
{
tree copy = copy_var_decl (var, name, type);
DECL_CONTEXT (copy) = current_function_decl;
DECL_CHAIN (copy) = ctx->block_vars;
/* If VAR is listed in task_shared_vars, it means it wasn't
originally addressable and is just because task needs to take
it's address. But we don't need to take address of privatizations
from that var. */
if (TREE_ADDRESSABLE (var)
&& task_shared_vars
&& bitmap_bit_p (task_shared_vars, DECL_UID (var)))
TREE_ADDRESSABLE (copy) = 0;
ctx->block_vars = copy;
return copy;
}
static tree
omp_copy_decl_1 (tree var, omp_context *ctx)
{
return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
}
/* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
as appropriate. */
static tree
omp_build_component_ref (tree obj, tree field)
{
tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
if (TREE_THIS_VOLATILE (field))
TREE_THIS_VOLATILE (ret) |= 1;
if (TREE_READONLY (field))
TREE_READONLY (ret) |= 1;
return ret;
}
/* Build tree nodes to access the field for VAR on the receiver side. */
static tree
build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
{
tree x, field = lookup_field (var, ctx);
/* If the receiver record type was remapped in the child function,
remap the field into the new record type. */
x = maybe_lookup_field (field, ctx);
if (x != NULL)
field = x;
x = build_simple_mem_ref (ctx->receiver_decl);
TREE_THIS_NOTRAP (x) = 1;
x = omp_build_component_ref (x, field);
if (by_ref)
{
x = build_simple_mem_ref (x);
TREE_THIS_NOTRAP (x) = 1;
}
return x;
}
/* Build tree nodes to access VAR in the scope outer to CTX. In the case
of a parallel, this is a component reference; for workshare constructs
this is some variable. */
static tree
build_outer_var_ref (tree var, omp_context *ctx,
enum omp_clause_code code = OMP_CLAUSE_ERROR)
{
tree x;
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
x = var;
else if (is_variable_sized (var))
{
x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
x = build_outer_var_ref (x, ctx, code);
x = build_simple_mem_ref (x);
}
else if (is_taskreg_ctx (ctx))
{
bool by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
}
else if ((gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
|| (code == OMP_CLAUSE_PRIVATE
&& (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
|| gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS
|| gimple_code (ctx->stmt) == GIMPLE_OMP_SINGLE)))
{
/* #pragma omp simd isn't a worksharing construct, and can reference
even private vars in its linear etc. clauses.
Similarly for OMP_CLAUSE_PRIVATE with outer ref, that can refer
to private vars in all worksharing constructs. */
x = NULL_TREE;
if (ctx->outer && is_taskreg_ctx (ctx))
x = lookup_decl (var, ctx->outer);
else if (ctx->outer)
x = maybe_lookup_decl_in_outer_ctx (var, ctx);
if (x == NULL_TREE)
x = var;
}
else if (code == OMP_CLAUSE_LASTPRIVATE && is_taskloop_ctx (ctx))
{
gcc_assert (ctx->outer);
splay_tree_node n
= splay_tree_lookup (ctx->outer->field_map,
(splay_tree_key) &DECL_UID (var));
if (n == NULL)
{
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx->outer)))
x = var;
else
x = lookup_decl (var, ctx->outer);
}
else
{
tree field = (tree) n->value;
/* If the receiver record type was remapped in the child function,
remap the field into the new record type. */
x = maybe_lookup_field (field, ctx->outer);
if (x != NULL)
field = x;
x = build_simple_mem_ref (ctx->outer->receiver_decl);
x = omp_build_component_ref (x, field);
if (use_pointer_for_field (var, ctx->outer))
x = build_simple_mem_ref (x);
}
}
else if (ctx->outer)
{
omp_context *outer = ctx->outer;
if (gimple_code (outer->stmt) == GIMPLE_OMP_GRID_BODY)
{
outer = outer->outer;
gcc_assert (outer
&& gimple_code (outer->stmt) != GIMPLE_OMP_GRID_BODY);
}
x = lookup_decl (var, outer);
}
else if (omp_is_reference (var))
/* This can happen with orphaned constructs. If var is reference, it is
possible it is shared and as such valid. */
x = var;
else if (omp_member_access_dummy_var (var))
x = var;
else
gcc_unreachable ();
if (x == var)
{
tree t = omp_member_access_dummy_var (var);
if (t)
{
x = DECL_VALUE_EXPR (var);
tree o = maybe_lookup_decl_in_outer_ctx (t, ctx);
if (o != t)
x = unshare_and_remap (x, t, o);
else
x = unshare_expr (x);
}
}
if (omp_is_reference (var))
x = build_simple_mem_ref (x);
return x;
}
/* Build tree nodes to access the field for VAR on the sender side. */
static tree
build_sender_ref (splay_tree_key key, omp_context *ctx)
{
tree field = lookup_sfield (key, ctx);
return omp_build_component_ref (ctx->sender_decl, field);
}
static tree
build_sender_ref (tree var, omp_context *ctx)
{
return build_sender_ref ((splay_tree_key) var, ctx);
}
/* Add a new field for VAR inside the structure CTX->SENDER_DECL. If
BASE_POINTERS_RESTRICT, declare the field with restrict. */
static void
install_var_field (tree var, bool by_ref, int mask, omp_context *ctx,
bool base_pointers_restrict = false)
{
tree field, type, sfield = NULL_TREE;
splay_tree_key key = (splay_tree_key) var;
if ((mask & 8) != 0)
{
key = (splay_tree_key) &DECL_UID (var);
gcc_checking_assert (key != (splay_tree_key) var);
}
gcc_assert ((mask & 1) == 0
|| !splay_tree_lookup (ctx->field_map, key));
gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
|| !splay_tree_lookup (ctx->sfield_map, key));
gcc_assert ((mask & 3) == 3
|| !is_gimple_omp_oacc (ctx->stmt));
type = TREE_TYPE (var);
/* Prevent redeclaring the var in the split-off function with a restrict
pointer type. Note that we only clear type itself, restrict qualifiers in
the pointed-to type will be ignored by points-to analysis. */
if (POINTER_TYPE_P (type)
&& TYPE_RESTRICT (type))
type = build_qualified_type (type, TYPE_QUALS (type) & ~TYPE_QUAL_RESTRICT);
if (mask & 4)
{
gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
type = build_pointer_type (build_pointer_type (type));
}
else if (by_ref)
{
type = build_pointer_type (type);
if (base_pointers_restrict)
type = build_qualified_type (type, TYPE_QUAL_RESTRICT);
}
else if ((mask & 3) == 1 && omp_is_reference (var))
type = TREE_TYPE (type);
field = build_decl (DECL_SOURCE_LOCATION (var),
FIELD_DECL, DECL_NAME (var), type);
/* Remember what variable this field was created for. This does have a
side effect of making dwarf2out ignore this member, so for helpful
debugging we clear it later in delete_omp_context. */
DECL_ABSTRACT_ORIGIN (field) = var;
if (type == TREE_TYPE (var))
{
SET_DECL_ALIGN (field, DECL_ALIGN (var));
DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
}
else
SET_DECL_ALIGN (field, TYPE_ALIGN (type));
if ((mask & 3) == 3)
{
insert_field_into_struct (ctx->record_type, field);
if (ctx->srecord_type)
{
sfield = build_decl (DECL_SOURCE_LOCATION (var),
FIELD_DECL, DECL_NAME (var), type);
DECL_ABSTRACT_ORIGIN (sfield) = var;
SET_DECL_ALIGN (sfield, DECL_ALIGN (field));
DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
insert_field_into_struct (ctx->srecord_type, sfield);
}
}
else
{
if (ctx->srecord_type == NULL_TREE)
{
tree t;
ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
{
sfield = build_decl (DECL_SOURCE_LOCATION (t),
FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
insert_field_into_struct (ctx->srecord_type, sfield);
splay_tree_insert (ctx->sfield_map,
(splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
(splay_tree_value) sfield);
}
}
sfield = field;
insert_field_into_struct ((mask & 1) ? ctx->record_type
: ctx->srecord_type, field);
}
if (mask & 1)
splay_tree_insert (ctx->field_map, key, (splay_tree_value) field);
if ((mask & 2) && ctx->sfield_map)
splay_tree_insert (ctx->sfield_map, key, (splay_tree_value) sfield);
}
static tree
install_var_local (tree var, omp_context *ctx)
{
tree new_var = omp_copy_decl_1 (var, ctx);
insert_decl_map (&ctx->cb, var, new_var);
return new_var;
}
/* Adjust the replacement for DECL in CTX for the new context. This means
copying the DECL_VALUE_EXPR, and fixing up the type. */
static void
fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
{
tree new_decl, size;
new_decl = lookup_decl (decl, ctx);
TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
&& DECL_HAS_VALUE_EXPR_P (decl))
{
tree ve = DECL_VALUE_EXPR (decl);
walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
SET_DECL_VALUE_EXPR (new_decl, ve);
DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
}
if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
{
size = remap_decl (DECL_SIZE (decl), &ctx->cb);
if (size == error_mark_node)
size = TYPE_SIZE (TREE_TYPE (new_decl));
DECL_SIZE (new_decl) = size;
size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
if (size == error_mark_node)
size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
DECL_SIZE_UNIT (new_decl) = size;
}
}
/* The callback for remap_decl. Search all containing contexts for a
mapping of the variable; this avoids having to duplicate the splay
tree ahead of time. We know a mapping doesn't already exist in the
given context. Create new mappings to implement default semantics. */
static tree
omp_copy_decl (tree var, copy_body_data *cb)
{
omp_context *ctx = (omp_context *) cb;
tree new_var;
if (TREE_CODE (var) == LABEL_DECL)
{
if (FORCED_LABEL (var) || DECL_NONLOCAL (var))
return var;
new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
DECL_CONTEXT (new_var) = current_function_decl;
insert_decl_map (&ctx->cb, var, new_var);
return new_var;
}
while (!is_taskreg_ctx (ctx))
{
ctx = ctx->outer;
if (ctx == NULL)
return var;
new_var = maybe_lookup_decl (var, ctx);
if (new_var)
return new_var;
}
if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
return var;
return error_mark_node;
}
/* Create a new context, with OUTER_CTX being the surrounding context. */
static omp_context *
new_omp_context (gimple *stmt, omp_context *outer_ctx)
{
omp_context *ctx = XCNEW (omp_context);
splay_tree_insert (all_contexts, (splay_tree_key) stmt,
(splay_tree_value) ctx);
ctx->stmt = stmt;
if (outer_ctx)
{
ctx->outer = outer_ctx;
ctx->cb = outer_ctx->cb;
ctx->cb.block = NULL;
ctx->depth = outer_ctx->depth + 1;
}
else
{
ctx->cb.src_fn = current_function_decl;
ctx->cb.dst_fn = current_function_decl;
ctx->cb.src_node = cgraph_node::get (current_function_decl);
gcc_checking_assert (ctx->cb.src_node);
ctx->cb.dst_node = ctx->cb.src_node;
ctx->cb.src_cfun = cfun;
ctx->cb.copy_decl = omp_copy_decl;
ctx->cb.eh_lp_nr = 0;
ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
ctx->depth = 1;
}
ctx->cb.decl_map = new hash_map<tree, tree>;
return ctx;
}
static gimple_seq maybe_catch_exception (gimple_seq);
/* Finalize task copyfn. */
static void
finalize_task_copyfn (gomp_task *task_stmt)
{
struct function *child_cfun;
tree child_fn;
gimple_seq seq = NULL, new_seq;
gbind *bind;
child_fn = gimple_omp_task_copy_fn (task_stmt);
if (child_fn == NULL_TREE)
return;
child_cfun = DECL_STRUCT_FUNCTION (child_fn);
DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
push_cfun (child_cfun);
bind = gimplify_body (child_fn, false);
gimple_seq_add_stmt (&seq, bind);
new_seq = maybe_catch_exception (seq);
if (new_seq != seq)
{
bind = gimple_build_bind (NULL, new_seq, NULL);
seq = NULL;
gimple_seq_add_stmt (&seq, bind);
}
gimple_set_body (child_fn, seq);
pop_cfun ();
/* Inform the callgraph about the new function. */
cgraph_node *node = cgraph_node::get_create (child_fn);
node->parallelized_function = 1;
cgraph_node::add_new_function (child_fn, false);
}
/* Destroy a omp_context data structures. Called through the splay tree
value delete callback. */
static void
delete_omp_context (splay_tree_value value)
{
omp_context *ctx = (omp_context *) value;
delete ctx->cb.decl_map;
if (ctx->field_map)
splay_tree_delete (ctx->field_map);
if (ctx->sfield_map)
splay_tree_delete (ctx->sfield_map);
/* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
it produces corrupt debug information. */
if (ctx->record_type)
{
tree t;
for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
DECL_ABSTRACT_ORIGIN (t) = NULL;
}
if (ctx->srecord_type)
{
tree t;
for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
DECL_ABSTRACT_ORIGIN (t) = NULL;
}
if (is_task_ctx (ctx))
finalize_task_copyfn (as_a <gomp_task *> (ctx->stmt));
XDELETE (ctx);
}
/* Fix up RECEIVER_DECL with a type that has been remapped to the child
context. */
static void
fixup_child_record_type (omp_context *ctx)
{
tree f, type = ctx->record_type;
if (!ctx->receiver_decl)
return;
/* ??? It isn't sufficient to just call remap_type here, because
variably_modified_type_p doesn't work the way we expect for
record types. Testing each field for whether it needs remapping
and creating a new record by hand works, however. */
for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
break;
if (f)
{
tree name, new_fields = NULL;
type = lang_hooks.types.make_type (RECORD_TYPE);
name = DECL_NAME (TYPE_NAME (ctx->record_type));
name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
TYPE_DECL, name, type);
TYPE_NAME (type) = name;
for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
{
tree new_f = copy_node (f);
DECL_CONTEXT (new_f) = type;
TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
DECL_CHAIN (new_f) = new_fields;
walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
&ctx->cb, NULL);
walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
&ctx->cb, NULL);
new_fields = new_f;
/* Arrange to be able to look up the receiver field
given the sender field. */
splay_tree_insert (ctx->field_map, (splay_tree_key) f,
(splay_tree_value) new_f);
}
TYPE_FIELDS (type) = nreverse (new_fields);
layout_type (type);
}
/* In a target region we never modify any of the pointers in *.omp_data_i,
so attempt to help the optimizers. */
if (is_gimple_omp_offloaded (ctx->stmt))
type = build_qualified_type (type, TYPE_QUAL_CONST);
TREE_TYPE (ctx->receiver_decl)
= build_qualified_type (build_reference_type (type), TYPE_QUAL_RESTRICT);
}
/* Instantiate decls as necessary in CTX to satisfy the data sharing
specified by CLAUSES. If BASE_POINTERS_RESTRICT, install var field with
restrict. */
static void
scan_sharing_clauses (tree clauses, omp_context *ctx,
bool base_pointers_restrict = false)
{
tree c, decl;
bool scan_array_reductions = false;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
bool by_ref;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
goto do_private;
else if (!is_variable_sized (decl))
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_SHARED:
decl = OMP_CLAUSE_DECL (c);
/* Ignore shared directives in teams construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
{
/* Global variables don't need to be copied,
the receiver side will use them directly. */
tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
if (is_global_var (odecl))
break;
insert_decl_map (&ctx->cb, decl, odecl);
break;
}
gcc_assert (is_taskreg_ctx (ctx));
gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
|| !is_variable_sized (decl));
/* Global variables don't need to be copied,
the receiver side will use them directly. */
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
break;
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
{
use_pointer_for_field (decl, ctx);
break;
}
by_ref = use_pointer_for_field (decl, NULL);
if ((! TREE_READONLY (decl) && !OMP_CLAUSE_SHARED_READONLY (c))
|| TREE_ADDRESSABLE (decl)
|| by_ref
|| omp_is_reference (decl))
{
by_ref = use_pointer_for_field (decl, ctx);
install_var_field (decl, by_ref, 3, ctx);
install_var_local (decl, ctx);
break;
}
/* We don't need to copy const scalar vars back. */
OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
goto do_private;
case OMP_CLAUSE_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& TREE_CODE (decl) == MEM_REF)
{
tree t = TREE_OPERAND (decl, 0);
if (TREE_CODE (t) == POINTER_PLUS_EXPR)
t = TREE_OPERAND (t, 0);
if (TREE_CODE (t) == INDIRECT_REF
|| TREE_CODE (t) == ADDR_EXPR)
t = TREE_OPERAND (t, 0);
install_var_local (t, ctx);
if (is_taskreg_ctx (ctx)
&& !is_global_var (maybe_lookup_decl_in_outer_ctx (t, ctx))
&& !is_variable_sized (t))
{
by_ref = use_pointer_for_field (t, ctx);
install_var_field (t, by_ref, 3, ctx);
}
break;
}
goto do_private;
case OMP_CLAUSE_LASTPRIVATE:
/* Let the corresponding firstprivate clause create
the variable. */
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LINEAR:
decl = OMP_CLAUSE_DECL (c);
do_private:
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IS_DEVICE_PTR)
&& is_gimple_omp_offloaded (ctx->stmt))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
install_var_field (decl, !omp_is_reference (decl), 3, ctx);
else if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
install_var_field (decl, true, 3, ctx);
else
install_var_field (decl, false, 3, ctx);
}
if (is_variable_sized (decl))
{
if (is_task_ctx (ctx))
install_var_field (decl, false, 1, ctx);
break;
}
else if (is_taskreg_ctx (ctx))
{
bool global
= is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
by_ref = use_pointer_for_field (decl, NULL);
if (is_task_ctx (ctx)
&& (global || by_ref || omp_is_reference (decl)))
{
install_var_field (decl, false, 1, ctx);
if (!global)
install_var_field (decl, by_ref, 2, ctx);
}
else if (!global)
install_var_field (decl, by_ref, 3, ctx);
}
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_USE_DEVICE_PTR:
decl = OMP_CLAUSE_DECL (c);
if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
install_var_field (decl, true, 3, ctx);
else
install_var_field (decl, false, 3, ctx);
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
install_var_local (decl2, ctx);
}
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_IS_DEVICE_PTR:
decl = OMP_CLAUSE_DECL (c);
goto do_private;
case OMP_CLAUSE__LOOPTEMP_:
gcc_assert (is_taskreg_ctx (ctx));
decl = OMP_CLAUSE_DECL (c);
install_var_field (decl, false, 3, ctx);
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_COPYIN:
decl = OMP_CLAUSE_DECL (c);
by_ref = use_pointer_for_field (decl, NULL);
install_var_field (decl, by_ref, 3, ctx);
break;
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
if (ctx->outer)
scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
break;
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE_MAP:
if (ctx->outer)
scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
decl = OMP_CLAUSE_DECL (c);
/* Global variables with "omp declare target" attribute
don't need to be copied, the receiver side will use them
directly. However, global variables with "omp declare target link"
attribute need to be copied. Or when ALWAYS modifier is used. */
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& DECL_P (decl)
&& ((OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_REFERENCE))
|| TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_TO
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_FROM
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_TOFROM
&& is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
&& varpool_node::get_create (decl)->offloadable
&& !lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (decl)))
break;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER)
{
/* Ignore GOMP_MAP_POINTER kind for arrays in regions that are
not offloaded; there is nothing to map for those. */
if (!is_gimple_omp_offloaded (ctx->stmt)
&& !POINTER_TYPE_P (TREE_TYPE (decl))
&& !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
break;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
{
if (TREE_CODE (decl) == COMPONENT_REF
|| (TREE_CODE (decl) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
== REFERENCE_TYPE)))
break;
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
install_var_local (decl2, ctx);
}
install_var_local (decl, ctx);
break;
}
if (DECL_P (decl))
{
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
install_var_field (decl2, true, 3, ctx);
install_var_local (decl2, ctx);
install_var_local (decl, ctx);
}
else
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
&& !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
&& TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
install_var_field (decl, true, 7, ctx);
else
install_var_field (decl, true, 3, ctx,
base_pointers_restrict);
if (is_gimple_omp_offloaded (ctx->stmt)
&& !OMP_CLAUSE_MAP_IN_REDUCTION (c))
install_var_local (decl, ctx);
}
}
else
{
tree base = get_base_address (decl);
tree nc = OMP_CLAUSE_CHAIN (c);
if (DECL_P (base)
&& nc != NULL_TREE
&& OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_DECL (nc) == base
&& OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_POINTER
&& integer_zerop (OMP_CLAUSE_SIZE (nc)))
{
OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
}
else
{
if (ctx->outer)
{
scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
decl = OMP_CLAUSE_DECL (c);
}
gcc_assert (!splay_tree_lookup (ctx->field_map,
(splay_tree_key) decl));
tree field
= build_decl (OMP_CLAUSE_LOCATION (c),
FIELD_DECL, NULL_TREE, ptr_type_node);
SET_DECL_ALIGN (field, TYPE_ALIGN (ptr_type_node));
insert_field_into_struct (ctx->record_type, field);
splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
(splay_tree_value) field);
}
}
break;
case OMP_CLAUSE__GRIDDIM_:
if (ctx->outer)
{
scan_omp_op (&OMP_CLAUSE__GRIDDIM__SIZE (c), ctx->outer);
scan_omp_op (&OMP_CLAUSE__GRIDDIM__GROUP (c), ctx->outer);
}
break;
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_TILE:
case OMP_CLAUSE__SIMT_:
case OMP_CLAUSE_DEFAULT:
break;
case OMP_CLAUSE_ALIGNED:
decl = OMP_CLAUSE_DECL (c);
if (is_global_var (decl)
&& TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
install_var_local (decl, ctx);
break;
case OMP_CLAUSE__CACHE_:
default:
gcc_unreachable ();
}
}
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_LASTPRIVATE:
/* Let the corresponding firstprivate clause create
the variable. */
if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
scan_array_reductions = true;
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_LINEAR:
case OMP_CLAUSE_IS_DEVICE_PTR:
decl = OMP_CLAUSE_DECL (c);
if (is_variable_sized (decl))
{
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IS_DEVICE_PTR)
&& is_gimple_omp_offloaded (ctx->stmt))
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
install_var_local (decl2, ctx);
fixup_remapped_decl (decl2, ctx, false);
}
install_var_local (decl, ctx);
}
fixup_remapped_decl (decl, ctx,
OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
&& OMP_CLAUSE_PRIVATE_DEBUG (c));
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
scan_array_reductions = true;
break;
case OMP_CLAUSE_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
if (TREE_CODE (decl) != MEM_REF)
{
if (is_variable_sized (decl))
install_var_local (decl, ctx);
fixup_remapped_decl (decl, ctx, false);
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
scan_array_reductions = true;
break;
case OMP_CLAUSE_SHARED:
/* Ignore shared directives in teams construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
break;
decl = OMP_CLAUSE_DECL (c);
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
break;
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
{
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl,
ctx->outer)))
break;
bool by_ref = use_pointer_for_field (decl, ctx);
install_var_field (decl, by_ref, 11, ctx);
break;
}
fixup_remapped_decl (decl, ctx, false);
break;
case OMP_CLAUSE_MAP:
if (!is_gimple_omp_offloaded (ctx->stmt))
break;
decl = OMP_CLAUSE_DECL (c);
if (DECL_P (decl)
&& ((OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_REFERENCE))
|| TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
&& is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
&& varpool_node::get_create (decl)->offloadable)
break;
if (DECL_P (decl))
{
if ((OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER)
&& TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
&& !COMPLETE_TYPE_P (TREE_TYPE (decl)))
{
tree new_decl = lookup_decl (decl, ctx);
TREE_TYPE (new_decl)
= remap_type (TREE_TYPE (decl), &ctx->cb);
}
else if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
fixup_remapped_decl (decl2, ctx, false);
fixup_remapped_decl (decl, ctx, true);
}
else
fixup_remapped_decl (decl, ctx, false);
}
break;
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_ALIGNED:
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE__LOOPTEMP_:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_TILE:
case OMP_CLAUSE__GRIDDIM_:
case OMP_CLAUSE__SIMT_:
break;
case OMP_CLAUSE__CACHE_:
default:
gcc_unreachable ();
}
}
gcc_checking_assert (!scan_array_reductions
|| !is_gimple_omp_oacc (ctx->stmt));
if (scan_array_reductions)
{
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
}
}
/* Create a new name for omp child function. Returns an identifier. */
static tree
create_omp_child_function_name (bool task_copy)
{
return clone_function_name (current_function_decl,
task_copy ? "_omp_cpyfn" : "_omp_fn");
}
/* Return true if CTX may belong to offloaded code: either if current function
is offloaded, or any enclosing context corresponds to a target region. */
static bool
omp_maybe_offloaded_ctx (omp_context *ctx)
{
if (cgraph_node::get (current_function_decl)->offloadable)
return true;
for (; ctx; ctx = ctx->outer)
if (is_gimple_omp_offloaded (ctx->stmt))
return true;
return false;
}
/* Build a decl for the omp child function. It'll not contain a body
yet, just the bare decl. */
static void
create_omp_child_function (omp_context *ctx, bool task_copy)
{
tree decl, type, name, t;
name = create_omp_child_function_name (task_copy);
if (task_copy)
type = build_function_type_list (void_type_node, ptr_type_node,
ptr_type_node, NULL_TREE);
else
type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
gcc_checking_assert (!is_gimple_omp_oacc (ctx->stmt)
|| !task_copy);
if (!task_copy)
ctx->cb.dst_fn = decl;
else
gimple_omp_task_set_copy_fn (ctx->stmt, decl);
TREE_STATIC (decl) = 1;
TREE_USED (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 0;
TREE_PUBLIC (decl) = 0;
DECL_UNINLINABLE (decl) = 1;
DECL_EXTERNAL (decl) = 0;
DECL_CONTEXT (decl) = NULL_TREE;
DECL_INITIAL (decl) = make_node (BLOCK);
BLOCK_SUPERCONTEXT (DECL_INITIAL (decl)) = decl;
DECL_ATTRIBUTES (decl) = DECL_ATTRIBUTES (current_function_decl);
/* Remove omp declare simd attribute from the new attributes. */
if (tree a = lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (decl)))
{
while (tree a2 = lookup_attribute ("omp declare simd", TREE_CHAIN (a)))
a = a2;
a = TREE_CHAIN (a);
for (tree *p = &DECL_ATTRIBUTES (decl); *p != a;)
if (is_attribute_p ("omp declare simd", get_attribute_name (*p)))
*p = TREE_CHAIN (*p);
else
{
tree chain = TREE_CHAIN (*p);
*p = copy_node (*p);
p = &TREE_CHAIN (*p);
*p = chain;
}
}
DECL_FUNCTION_SPECIFIC_OPTIMIZATION (decl)
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (current_function_decl);
DECL_FUNCTION_SPECIFIC_TARGET (decl)
= DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl);
DECL_FUNCTION_VERSIONED (decl)
= DECL_FUNCTION_VERSIONED (current_function_decl);
if (omp_maybe_offloaded_ctx (ctx))
{
cgraph_node::get_create (decl)->offloadable = 1;
if (ENABLE_OFFLOADING)
g->have_offload = true;
}
if (cgraph_node::get_create (decl)->offloadable
&& !lookup_attribute ("omp declare target",
DECL_ATTRIBUTES (current_function_decl)))
{
const char *target_attr = (is_gimple_omp_offloaded (ctx->stmt)
? "omp target entrypoint"
: "omp declare target");
DECL_ATTRIBUTES (decl)
= tree_cons (get_identifier (target_attr),
NULL_TREE, DECL_ATTRIBUTES (decl));
}
t = build_decl (DECL_SOURCE_LOCATION (decl),
RESULT_DECL, NULL_TREE, void_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_IGNORED_P (t) = 1;
DECL_CONTEXT (t) = decl;
DECL_RESULT (decl) = t;
tree data_name = get_identifier (".omp_data_i");
t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
ptr_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_NAMELESS (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = current_function_decl;
TREE_USED (t) = 1;
TREE_READONLY (t) = 1;
DECL_ARGUMENTS (decl) = t;
if (!task_copy)
ctx->receiver_decl = t;
else
{
t = build_decl (DECL_SOURCE_LOCATION (decl),
PARM_DECL, get_identifier (".omp_data_o"),
ptr_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_NAMELESS (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = current_function_decl;
TREE_USED (t) = 1;
TREE_ADDRESSABLE (t) = 1;
DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
DECL_ARGUMENTS (decl) = t;
}
/* Allocate memory for the function structure. The call to
allocate_struct_function clobbers CFUN, so we need to restore
it afterward. */
push_struct_function (decl);
cfun->function_end_locus = gimple_location (ctx->stmt);
init_tree_ssa (cfun);
pop_cfun ();
}
/* Callback for walk_gimple_seq. Check if combined parallel
contains gimple_omp_for_combined_into_p OMP_FOR. */
tree
omp_find_combined_for (gimple_stmt_iterator *gsi_p,
bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_OMP_FOR:
if (gimple_omp_for_combined_into_p (stmt)
&& gimple_omp_for_kind (stmt)
== *(const enum gf_mask *) (wi->info))
{
wi->info = stmt;
return integer_zero_node;
}
break;
default:
break;
}
return NULL;
}
/* Add _LOOPTEMP_ clauses on OpenMP parallel or task. */
static void
add_taskreg_looptemp_clauses (enum gf_mask msk, gimple *stmt,
omp_context *outer_ctx)
{
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.val_only = true;
wi.info = (void *) &msk;
walk_gimple_seq (gimple_omp_body (stmt), omp_find_combined_for, NULL, &wi);
if (wi.info != (void *) &msk)
{
gomp_for *for_stmt = as_a <gomp_for *> ((gimple *) wi.info);
struct omp_for_data fd;
omp_extract_for_data (for_stmt, &fd, NULL);
/* We need two temporaries with fd.loop.v type (istart/iend)
and then (fd.collapse - 1) temporaries with the same
type for count2 ... countN-1 vars if not constant. */
size_t count = 2, i;
tree type = fd.iter_type;
if (fd.collapse > 1
&& TREE_CODE (fd.loop.n2) != INTEGER_CST)
{
count += fd.collapse - 1;
/* If there are lastprivate clauses on the inner
GIMPLE_OMP_FOR, add one more temporaries for the total number
of iterations (product of count1 ... countN-1). */
if (omp_find_clause (gimple_omp_for_clauses (for_stmt),
OMP_CLAUSE_LASTPRIVATE))
count++;
else if (msk == GF_OMP_FOR_KIND_FOR
&& omp_find_clause (gimple_omp_parallel_clauses (stmt),
OMP_CLAUSE_LASTPRIVATE))
count++;
}
for (i = 0; i < count; i++)
{
tree temp = create_tmp_var (type);
tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
insert_decl_map (&outer_ctx->cb, temp, temp);
OMP_CLAUSE_DECL (c) = temp;
OMP_CLAUSE_CHAIN (c) = gimple_omp_taskreg_clauses (stmt);
gimple_omp_taskreg_set_clauses (stmt, c);
}
}
}
/* Scan an OpenMP parallel directive. */
static void
scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
{
omp_context *ctx;
tree name;
gomp_parallel *stmt = as_a <gomp_parallel *> (gsi_stmt (*gsi));
/* Ignore parallel directives with empty bodies, unless there
are copyin clauses. */
if (optimize > 0
&& empty_body_p (gimple_omp_body (stmt))
&& omp_find_clause (gimple_omp_parallel_clauses (stmt),
OMP_CLAUSE_COPYIN) == NULL)
{
gsi_replace (gsi, gimple_build_nop (), false);
return;
}
if (gimple_omp_parallel_combined_p (stmt))
add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_FOR, stmt, outer_ctx);
ctx = new_omp_context (stmt, outer_ctx);
taskreg_contexts.safe_push (ctx);
if (taskreg_nesting_level > 1)
ctx->is_nested = true;
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_data_s");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->record_type) = name;
TYPE_ARTIFICIAL (ctx->record_type) = 1;
if (!gimple_omp_parallel_grid_phony (stmt))
{
create_omp_child_function (ctx, false);
gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
}
scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = ctx->receiver_decl = NULL;
}
/* Scan an OpenMP task directive. */
static void
scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
{
omp_context *ctx;
tree name, t;
gomp_task *stmt = as_a <gomp_task *> (gsi_stmt (*gsi));
/* Ignore task directives with empty bodies, unless they have depend
clause. */
if (optimize > 0
&& empty_body_p (gimple_omp_body (stmt))
&& !omp_find_clause (gimple_omp_task_clauses (stmt), OMP_CLAUSE_DEPEND))
{
gsi_replace (gsi, gimple_build_nop (), false);
return;
}
if (gimple_omp_task_taskloop_p (stmt))
add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_TASKLOOP, stmt, outer_ctx);
ctx = new_omp_context (stmt, outer_ctx);
taskreg_contexts.safe_push (ctx);
if (taskreg_nesting_level > 1)
ctx->is_nested = true;
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_data_s");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->record_type) = name;
TYPE_ARTIFICIAL (ctx->record_type) = 1;
create_omp_child_function (ctx, false);
gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
if (ctx->srecord_type)
{
name = create_tmp_var_name (".omp_data_a");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->srecord_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->srecord_type) = name;
TYPE_ARTIFICIAL (ctx->srecord_type) = 1;
create_omp_child_function (ctx, true);
}
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
{
ctx->record_type = ctx->receiver_decl = NULL;
t = build_int_cst (long_integer_type_node, 0);
gimple_omp_task_set_arg_size (stmt, t);
t = build_int_cst (long_integer_type_node, 1);
gimple_omp_task_set_arg_align (stmt, t);
}
}
/* Helper function for finish_taskreg_scan, called through walk_tree.
If maybe_lookup_decl_in_outer_context returns non-NULL for some
tree, replace it in the expression. */
static tree
finish_taskreg_remap (tree *tp, int *walk_subtrees, void *data)
{
if (VAR_P (*tp))
{
omp_context *ctx = (omp_context *) data;
tree t = maybe_lookup_decl_in_outer_ctx (*tp, ctx);
if (t != *tp)
{
if (DECL_HAS_VALUE_EXPR_P (t))
t = unshare_expr (DECL_VALUE_EXPR (t));
*tp = t;
}
*walk_subtrees = 0;
}
else if (IS_TYPE_OR_DECL_P (*tp))
*walk_subtrees = 0;
return NULL_TREE;
}
/* If any decls have been made addressable during scan_omp,
adjust their fields if needed, and layout record types
of parallel/task constructs. */
static void
finish_taskreg_scan (omp_context *ctx)
{
if (ctx->record_type == NULL_TREE)
return;
/* If any task_shared_vars were needed, verify all
OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
statements if use_pointer_for_field hasn't changed
because of that. If it did, update field types now. */
if (task_shared_vars)
{
tree c;
for (c = gimple_omp_taskreg_clauses (ctx->stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& !OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
{
tree decl = OMP_CLAUSE_DECL (c);
/* Global variables don't need to be copied,
the receiver side will use them directly. */
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
continue;
if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
|| !use_pointer_for_field (decl, ctx))
continue;
tree field = lookup_field (decl, ctx);
if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
&& TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
continue;
TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
TREE_THIS_VOLATILE (field) = 0;
DECL_USER_ALIGN (field) = 0;
SET_DECL_ALIGN (field, TYPE_ALIGN (TREE_TYPE (field)));
if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
SET_TYPE_ALIGN (ctx->record_type, DECL_ALIGN (field));
if (ctx->srecord_type)
{
tree sfield = lookup_sfield (decl, ctx);
TREE_TYPE (sfield) = TREE_TYPE (field);
TREE_THIS_VOLATILE (sfield) = 0;
DECL_USER_ALIGN (sfield) = 0;
SET_DECL_ALIGN (sfield, DECL_ALIGN (field));
if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
SET_TYPE_ALIGN (ctx->srecord_type, DECL_ALIGN (sfield));
}
}
}
if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
{
layout_type (ctx->record_type);
fixup_child_record_type (ctx);
}
else
{
location_t loc = gimple_location (ctx->stmt);
tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
/* Move VLA fields to the end. */
p = &TYPE_FIELDS (ctx->record_type);
while (*p)
if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
|| ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
{
*q = *p;
*p = TREE_CHAIN (*p);
TREE_CHAIN (*q) = NULL_TREE;
q = &TREE_CHAIN (*q);
}
else
p = &DECL_CHAIN (*p);
*p = vla_fields;
if (gimple_omp_task_taskloop_p (ctx->stmt))
{
/* Move fields corresponding to first and second _looptemp_
clause first. There are filled by GOMP_taskloop
and thus need to be in specific positions. */
tree c1 = gimple_omp_task_clauses (ctx->stmt);
c1 = omp_find_clause (c1, OMP_CLAUSE__LOOPTEMP_);
tree c2 = omp_find_clause (OMP_CLAUSE_CHAIN (c1),
OMP_CLAUSE__LOOPTEMP_);
tree f1 = lookup_field (OMP_CLAUSE_DECL (c1), ctx);
tree f2 = lookup_field (OMP_CLAUSE_DECL (c2), ctx);
p = &TYPE_FIELDS (ctx->record_type);
while (*p)
if (*p == f1 || *p == f2)
*p = DECL_CHAIN (*p);
else
p = &DECL_CHAIN (*p);
DECL_CHAIN (f1) = f2;
DECL_CHAIN (f2) = TYPE_FIELDS (ctx->record_type);
TYPE_FIELDS (ctx->record_type) = f1;
if (ctx->srecord_type)
{
f1 = lookup_sfield (OMP_CLAUSE_DECL (c1), ctx);
f2 = lookup_sfield (OMP_CLAUSE_DECL (c2), ctx);
p = &TYPE_FIELDS (ctx->srecord_type);
while (*p)
if (*p == f1 || *p == f2)
*p = DECL_CHAIN (*p);
else
p = &DECL_CHAIN (*p);
DECL_CHAIN (f1) = f2;
DECL_CHAIN (f2) = TYPE_FIELDS (ctx->srecord_type);
TYPE_FIELDS (ctx->srecord_type) = f1;
}
}
layout_type (ctx->record_type);
fixup_child_record_type (ctx);
if (ctx->srecord_type)
layout_type (ctx->srecord_type);
tree t = fold_convert_loc (loc, long_integer_type_node,
TYPE_SIZE_UNIT (ctx->record_type));
if (TREE_CODE (t) != INTEGER_CST)
{
t = unshare_expr (t);
walk_tree (&t, finish_taskreg_remap, ctx, NULL);
}
gimple_omp_task_set_arg_size (ctx->stmt, t);
t = build_int_cst (long_integer_type_node,
TYPE_ALIGN_UNIT (ctx->record_type));
gimple_omp_task_set_arg_align (ctx->stmt, t);
}
}
/* Find the enclosing offload context. */
static omp_context *
enclosing_target_ctx (omp_context *ctx)
{
for (; ctx; ctx = ctx->outer)
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET)
break;
return ctx;
}
/* Return true if ctx is part of an oacc kernels region. */
static bool
ctx_in_oacc_kernels_region (omp_context *ctx)
{
for (;ctx != NULL; ctx = ctx->outer)
{
gimple *stmt = ctx->stmt;
if (gimple_code (stmt) == GIMPLE_OMP_TARGET
&& gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_OACC_KERNELS)
return true;
}
return false;
}
/* Check the parallelism clauses inside a kernels regions.
Until kernels handling moves to use the same loop indirection
scheme as parallel, we need to do this checking early. */
static unsigned
check_oacc_kernel_gwv (gomp_for *stmt, omp_context *ctx)
{
bool checking = true;
unsigned outer_mask = 0;
unsigned this_mask = 0;
bool has_seq = false, has_auto = false;
if (ctx->outer)
outer_mask = check_oacc_kernel_gwv (NULL, ctx->outer);
if (!stmt)
{
checking = false;
if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR)
return outer_mask;
stmt = as_a <gomp_for *> (ctx->stmt);
}
for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
{
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_GANG:
this_mask |= GOMP_DIM_MASK (GOMP_DIM_GANG);
break;
case OMP_CLAUSE_WORKER:
this_mask |= GOMP_DIM_MASK (GOMP_DIM_WORKER);
break;
case OMP_CLAUSE_VECTOR:
this_mask |= GOMP_DIM_MASK (GOMP_DIM_VECTOR);
break;
case OMP_CLAUSE_SEQ:
has_seq = true;
break;
case OMP_CLAUSE_AUTO:
has_auto = true;
break;
default:
break;
}
}
if (checking)
{
if (has_seq && (this_mask || has_auto))
error_at (gimple_location (stmt), "%<seq%> overrides other"
" OpenACC loop specifiers");
else if (has_auto && this_mask)
error_at (gimple_location (stmt), "%<auto%> conflicts with other"
" OpenACC loop specifiers");
if (this_mask & outer_mask)
error_at (gimple_location (stmt), "inner loop uses same"
" OpenACC parallelism as containing loop");
}
return outer_mask | this_mask;
}
/* Scan a GIMPLE_OMP_FOR. */
static omp_context *
scan_omp_for (gomp_for *stmt, omp_context *outer_ctx)
{
omp_context *ctx;
size_t i;
tree clauses = gimple_omp_for_clauses (stmt);
ctx = new_omp_context (stmt, outer_ctx);
if (is_gimple_omp_oacc (stmt))
{
omp_context *tgt = enclosing_target_ctx (outer_ctx);
if (!tgt || is_oacc_parallel (tgt))
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
char const *check = NULL;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_GANG:
check = "gang";
break;
case OMP_CLAUSE_WORKER:
check = "worker";
break;
case OMP_CLAUSE_VECTOR:
check = "vector";
break;
default:
break;
}
if (check && OMP_CLAUSE_OPERAND (c, 0))
error_at (gimple_location (stmt),
"argument not permitted on %qs clause in"
" OpenACC %<parallel%>", check);
}
if (tgt && is_oacc_kernels (tgt))
{
/* Strip out reductions, as they are not handled yet. */
tree *prev_ptr = &clauses;
while (tree probe = *prev_ptr)
{
tree *next_ptr = &OMP_CLAUSE_CHAIN (probe);
if (OMP_CLAUSE_CODE (probe) == OMP_CLAUSE_REDUCTION)
*prev_ptr = *next_ptr;
else
prev_ptr = next_ptr;
}
gimple_omp_for_set_clauses (stmt, clauses);
check_oacc_kernel_gwv (stmt, ctx);
}
}
scan_sharing_clauses (clauses, ctx);
scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
{
scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
}
scan_omp (gimple_omp_body_ptr (stmt), ctx);
return ctx;
}
/* Duplicate #pragma omp simd, one for SIMT, another one for SIMD. */
static void
scan_omp_simd (gimple_stmt_iterator *gsi, gomp_for *stmt,
omp_context *outer_ctx)
{
gbind *bind = gimple_build_bind (NULL, NULL, NULL);
gsi_replace (gsi, bind, false);
gimple_seq seq = NULL;
gimple *g = gimple_build_call_internal (IFN_GOMP_USE_SIMT, 0);
tree cond = create_tmp_var_raw (integer_type_node);
DECL_CONTEXT (cond) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (cond) = 1;
gimple_bind_set_vars (bind, cond);
gimple_call_set_lhs (g, cond);
gimple_seq_add_stmt (&seq, g);
tree lab1 = create_artificial_label (UNKNOWN_LOCATION);
tree lab2 = create_artificial_label (UNKNOWN_LOCATION);
tree lab3 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (NE_EXPR, cond, integer_zero_node, lab1, lab2);
gimple_seq_add_stmt (&seq, g);
g = gimple_build_label (lab1);
gimple_seq_add_stmt (&seq, g);
gimple_seq new_seq = copy_gimple_seq_and_replace_locals (stmt);
gomp_for *new_stmt = as_a <gomp_for *> (new_seq);
tree clause = build_omp_clause (gimple_location (stmt), OMP_CLAUSE__SIMT_);
OMP_CLAUSE_CHAIN (clause) = gimple_omp_for_clauses (new_stmt);
gimple_omp_for_set_clauses (new_stmt, clause);
gimple_seq_add_stmt (&seq, new_stmt);
g = gimple_build_goto (lab3);
gimple_seq_add_stmt (&seq, g);
g = gimple_build_label (lab2);
gimple_seq_add_stmt (&seq, g);
gimple_seq_add_stmt (&seq, stmt);
g = gimple_build_label (lab3);
gimple_seq_add_stmt (&seq, g);
gimple_bind_set_body (bind, seq);
update_stmt (bind);
scan_omp_for (new_stmt, outer_ctx);
scan_omp_for (stmt, outer_ctx)->simt_stmt = new_stmt;
}
/* Scan an OpenMP sections directive. */
static void
scan_omp_sections (gomp_sections *stmt, omp_context *outer_ctx)
{
omp_context *ctx;
ctx = new_omp_context (stmt, outer_ctx);
scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
}
/* Scan an OpenMP single directive. */
static void
scan_omp_single (gomp_single *stmt, omp_context *outer_ctx)
{
omp_context *ctx;
tree name;
ctx = new_omp_context (stmt, outer_ctx);
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_copy_s");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
TYPE_NAME (ctx->record_type) = name;
scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = NULL;
else
layout_type (ctx->record_type);
}
/* Return true if the CLAUSES of an omp target guarantee that the base pointers
used in the corresponding offloaded function are restrict. */
static bool
omp_target_base_pointers_restrict_p (tree clauses)
{
/* The analysis relies on the GOMP_MAP_FORCE_* mapping kinds, which are only
used by OpenACC. */
if (flag_openacc == 0)
return false;
/* I. Basic example:
void foo (void)
{
unsigned int a[2], b[2];
#pragma acc kernels \
copyout (a) \
copyout (b)
{
a[0] = 0;
b[0] = 1;
}
}
After gimplification, we have:
#pragma omp target oacc_kernels \
map(force_from:a [len: 8]) \
map(force_from:b [len: 8])
{
a[0] = 0;
b[0] = 1;
}
Because both mappings have the force prefix, we know that they will be
allocated when calling the corresponding offloaded function, which means we
can mark the base pointers for a and b in the offloaded function as
restrict. */
tree c;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
return false;
switch (OMP_CLAUSE_MAP_KIND (c))
{
case GOMP_MAP_FORCE_ALLOC:
case GOMP_MAP_FORCE_TO:
case GOMP_MAP_FORCE_FROM:
case GOMP_MAP_FORCE_TOFROM:
break;
default:
return false;
}
}
return true;
}
/* Scan a GIMPLE_OMP_TARGET. */
static void
scan_omp_target (gomp_target *stmt, omp_context *outer_ctx)
{
omp_context *ctx;
tree name;
bool offloaded = is_gimple_omp_offloaded (stmt);
tree clauses = gimple_omp_target_clauses (stmt);
ctx = new_omp_context (stmt, outer_ctx);
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_data_t");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->record_type) = name;
TYPE_ARTIFICIAL (ctx->record_type) = 1;
bool base_pointers_restrict = false;
if (offloaded)
{
create_omp_child_function (ctx, false);
gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
base_pointers_restrict = omp_target_base_pointers_restrict_p (clauses);
if (base_pointers_restrict
&& dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
"Base pointers in offloaded function are restrict\n");
}
scan_sharing_clauses (clauses, ctx, base_pointers_restrict);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = ctx->receiver_decl = NULL;
else
{
TYPE_FIELDS (ctx->record_type)
= nreverse (TYPE_FIELDS (ctx->record_type));
if (flag_checking)
{
unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
for (tree field = TYPE_FIELDS (ctx->record_type);
field;
field = DECL_CHAIN (field))
gcc_assert (DECL_ALIGN (field) == align);
}
layout_type (ctx->record_type);
if (offloaded)
fixup_child_record_type (ctx);
}
}
/* Scan an OpenMP teams directive. */
static void
scan_omp_teams (gomp_teams *stmt, omp_context *outer_ctx)
{
omp_context *ctx = new_omp_context (stmt, outer_ctx);
scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
}
/* Check nesting restrictions. */
static bool
check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
{
tree c;
if (ctx && gimple_code (ctx->stmt) == GIMPLE_OMP_GRID_BODY)
/* GRID_BODY is an artificial construct, nesting rules will be checked in
the original copy of its contents. */
return true;
/* No nesting of non-OpenACC STMT (that is, an OpenMP one, or a GOMP builtin)
inside an OpenACC CTX. */
if (!(is_gimple_omp (stmt)
&& is_gimple_omp_oacc (stmt))
/* Except for atomic codes that we share with OpenMP. */
&& !(gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD
|| gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE))
{
if (oacc_get_fn_attrib (cfun->decl) != NULL)
{
error_at (gimple_location (stmt),
"non-OpenACC construct inside of OpenACC routine");
return false;
}
else
for (omp_context *octx = ctx; octx != NULL; octx = octx->outer)
if (is_gimple_omp (octx->stmt)
&& is_gimple_omp_oacc (octx->stmt))
{
error_at (gimple_location (stmt),
"non-OpenACC construct inside of OpenACC region");
return false;
}
}
if (ctx != NULL)
{
if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
{
c = NULL_TREE;
if (gimple_code (stmt) == GIMPLE_OMP_ORDERED)
{
c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
if (omp_find_clause (c, OMP_CLAUSE_SIMD))
{
if (omp_find_clause (c, OMP_CLAUSE_THREADS)
&& (ctx->outer == NULL
|| !gimple_omp_for_combined_into_p (ctx->stmt)
|| gimple_code (ctx->outer->stmt) != GIMPLE_OMP_FOR
|| (gimple_omp_for_kind (ctx->outer->stmt)
!= GF_OMP_FOR_KIND_FOR)
|| !gimple_omp_for_combined_p (ctx->outer->stmt)))
{
error_at (gimple_location (stmt),
"%<ordered simd threads%> must be closely "
"nested inside of %<for simd%> region");
return false;
}
return true;
}
}
error_at (gimple_location (stmt),
"OpenMP constructs other than %<#pragma omp ordered simd%>"
" may not be nested inside %<simd%> region");
return false;
}
else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
{
if ((gimple_code (stmt) != GIMPLE_OMP_FOR
|| ((gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_DISTRIBUTE)
&& (gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_GRID_LOOP)))
&& gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
{
error_at (gimple_location (stmt),
"only %<distribute%> or %<parallel%> regions are "
"allowed to be strictly nested inside %<teams%> "
"region");
return false;
}
}
}
switch (gimple_code (stmt))
{
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
return true;
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
{
if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
{
error_at (gimple_location (stmt),
"%<distribute%> region must be strictly nested "
"inside %<teams%> construct");
return false;
}
return true;
}
/* We split taskloop into task and nested taskloop in it. */
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_TASKLOOP)
return true;
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
{
bool ok = false;
if (ctx)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_FOR:
ok = (gimple_omp_for_kind (ctx->stmt)
== GF_OMP_FOR_KIND_OACC_LOOP);
break;
case GIMPLE_OMP_TARGET:
switch (gimple_omp_target_kind (ctx->stmt))
{
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
case GF_OMP_TARGET_KIND_OACC_KERNELS:
ok = true;
break;
default:
break;
}
default:
break;
}
else if (oacc_get_fn_attrib (current_function_decl))
ok = true;
if (!ok)
{
error_at (gimple_location (stmt),
"OpenACC loop directive must be associated with"
" an OpenACC compute region");
return false;
}
}
/* FALLTHRU */
case GIMPLE_CALL:
if (is_gimple_call (stmt)
&& (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
|| DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCELLATION_POINT))
{
const char *bad = NULL;
const char *kind = NULL;
const char *construct
= (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL)
? "#pragma omp cancel"
: "#pragma omp cancellation point";
if (ctx == NULL)
{
error_at (gimple_location (stmt), "orphaned %qs construct",
construct);
return false;
}
switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
? tree_to_shwi (gimple_call_arg (stmt, 0))
: 0)
{
case 1:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
bad = "#pragma omp parallel";
else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
&& !integer_zerop (gimple_call_arg (stmt, 1)))
ctx->cancellable = true;
kind = "parallel";
break;
case 2:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
|| gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
bad = "#pragma omp for";
else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
&& !integer_zerop (gimple_call_arg (stmt, 1)))
{
ctx->cancellable = true;
if (omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_NOWAIT))
warning_at (gimple_location (stmt), 0,
"%<#pragma omp cancel for%> inside "
"%<nowait%> for construct");
if (omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_ORDERED))
warning_at (gimple_location (stmt), 0,
"%<#pragma omp cancel for%> inside "
"%<ordered%> for construct");
}
kind = "for";
break;
case 4:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
&& gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
bad = "#pragma omp sections";
else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
&& !integer_zerop (gimple_call_arg (stmt, 1)))
{
if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
{
ctx->cancellable = true;
if (omp_find_clause (gimple_omp_sections_clauses
(ctx->stmt),
OMP_CLAUSE_NOWAIT))
warning_at (gimple_location (stmt), 0,
"%<#pragma omp cancel sections%> inside "
"%<nowait%> sections construct");
}
else
{
gcc_assert (ctx->outer
&& gimple_code (ctx->outer->stmt)
== GIMPLE_OMP_SECTIONS);
ctx->outer->cancellable = true;
if (omp_find_clause (gimple_omp_sections_clauses
(ctx->outer->stmt),
OMP_CLAUSE_NOWAIT))
warning_at (gimple_location (stmt), 0,
"%<#pragma omp cancel sections%> inside "
"%<nowait%> sections construct");
}
}
kind = "sections";
break;
case 8:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
bad = "#pragma omp task";
else
{
for (omp_context *octx = ctx->outer;
octx; octx = octx->outer)
{
switch (gimple_code (octx->stmt))
{
case GIMPLE_OMP_TASKGROUP:
break;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (octx->stmt)
!= GF_OMP_TARGET_KIND_REGION)
continue;
/* FALLTHRU */
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
error_at (gimple_location (stmt),
"%<%s taskgroup%> construct not closely "
"nested inside of %<taskgroup%> region",
construct);
return false;
default:
continue;
}
break;
}
ctx->cancellable = true;
}
kind = "taskgroup";
break;
default:
error_at (gimple_location (stmt), "invalid arguments");
return false;
}
if (bad)
{
error_at (gimple_location (stmt),
"%<%s %s%> construct not closely nested inside of %qs",
construct, kind, bad);
return false;
}
}
/* FALLTHRU */
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR
&& gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_TASKLOOP)
break;
/* FALLTHRU */
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_CRITICAL:
if (is_gimple_call (stmt))
{
if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
!= BUILT_IN_GOMP_BARRIER)
return true;
error_at (gimple_location (stmt),
"barrier region may not be closely nested inside "
"of work-sharing, %<critical%>, %<ordered%>, "
"%<master%>, explicit %<task%> or %<taskloop%> "
"region");
return false;
}
error_at (gimple_location (stmt),
"work-sharing region may not be closely nested inside "
"of work-sharing, %<critical%>, %<ordered%>, "
"%<master%>, explicit %<task%> or %<taskloop%> region");
return false;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
return true;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_REGION)
return true;
break;
default:
break;
}
break;
case GIMPLE_OMP_MASTER:
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR
&& gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_TASKLOOP)
break;
/* FALLTHRU */
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_TASK:
error_at (gimple_location (stmt),
"%<master%> region may not be closely nested inside "
"of work-sharing, explicit %<task%> or %<taskloop%> "
"region");
return false;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
return true;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_REGION)
return true;
break;
default:
break;
}
break;
case GIMPLE_OMP_TASK:
for (c = gimple_omp_task_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE
|| OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK))
{
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
error_at (OMP_CLAUSE_LOCATION (c),
"%<depend(%s)%> is only allowed in %<omp ordered%>",
kind == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink");
return false;
}
break;
case GIMPLE_OMP_ORDERED:
for (c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
c; c = OMP_CLAUSE_CHAIN (c))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
{
gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREADS
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SIMD);
continue;
}
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
if (kind == OMP_CLAUSE_DEPEND_SOURCE
|| kind == OMP_CLAUSE_DEPEND_SINK)
{
tree oclause;
/* Look for containing ordered(N) loop. */
if (ctx == NULL
|| gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
|| (oclause
= omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_ORDERED)) == NULL_TREE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<ordered%> construct with %<depend%> clause "
"must be closely nested inside an %<ordered%> "
"loop");
return false;
}
else if (OMP_CLAUSE_ORDERED_EXPR (oclause) == NULL_TREE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<ordered%> construct with %<depend%> clause "
"must be closely nested inside a loop with "
"%<ordered%> clause with a parameter");
return false;
}
}
else
{
error_at (OMP_CLAUSE_LOCATION (c),
"invalid depend kind in omp %<ordered%> %<depend%>");
return false;
}
}
c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
if (omp_find_clause (c, OMP_CLAUSE_SIMD))
{
/* ordered simd must be closely nested inside of simd region,
and simd region must not encounter constructs other than
ordered simd, therefore ordered simd may be either orphaned,
or ctx->stmt must be simd. The latter case is handled already
earlier. */
if (ctx != NULL)
{
error_at (gimple_location (stmt),
"%<ordered%> %<simd%> must be closely nested inside "
"%<simd%> region");
return false;
}
}
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_ORDERED:
ordered_in_taskloop:
error_at (gimple_location (stmt),
"%<ordered%> region may not be closely nested inside "
"of %<critical%>, %<ordered%>, explicit %<task%> or "
"%<taskloop%> region");
return false;
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_TASKLOOP)
goto ordered_in_taskloop;
tree o;
o = omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_ORDERED);
if (o == NULL)
{
error_at (gimple_location (stmt),
"%<ordered%> region must be closely nested inside "
"a loop region with an %<ordered%> clause");
return false;
}
if (OMP_CLAUSE_ORDERED_EXPR (o) != NULL_TREE
&& omp_find_clause (c, OMP_CLAUSE_DEPEND) == NULL_TREE)
{
error_at (gimple_location (stmt),
"%<ordered%> region without %<depend%> clause may "
"not be closely nested inside a loop region with "
"an %<ordered%> clause with a parameter");
return false;
}
return true;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (ctx->stmt)
!= GF_OMP_TARGET_KIND_REGION)
break;
/* FALLTHRU */
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
error_at (gimple_location (stmt),
"%<ordered%> region must be closely nested inside "
"a loop region with an %<ordered%> clause");
return false;
default:
break;
}
break;
case GIMPLE_OMP_CRITICAL:
{
tree this_stmt_name
= gimple_omp_critical_name (as_a <gomp_critical *> (stmt));
for (; ctx != NULL; ctx = ctx->outer)
if (gomp_critical *other_crit
= dyn_cast <gomp_critical *> (ctx->stmt))
if (this_stmt_name == gimple_omp_critical_name (other_crit))
{
error_at (gimple_location (stmt),
"%<critical%> region may not be nested inside "
"a %<critical%> region with the same name");
return false;
}
}
break;
case GIMPLE_OMP_TEAMS:
if (ctx == NULL
|| gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
|| gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
{
error_at (gimple_location (stmt),
"%<teams%> construct not closely nested inside of "
"%<target%> construct");
return false;
}
break;
case GIMPLE_OMP_TARGET:
for (c = gimple_omp_target_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE
|| OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK))
{
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
error_at (OMP_CLAUSE_LOCATION (c),
"%<depend(%s)%> is only allowed in %<omp ordered%>",
kind == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink");
return false;
}
if (is_gimple_omp_offloaded (stmt)
&& oacc_get_fn_attrib (cfun->decl) != NULL)
{
error_at (gimple_location (stmt),
"OpenACC region inside of OpenACC routine, nested "
"parallelism not supported yet");
return false;
}
for (; ctx != NULL; ctx = ctx->outer)
{
if (gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET)
{
if (is_gimple_omp (stmt)
&& is_gimple_omp_oacc (stmt)
&& is_gimple_omp (ctx->stmt))
{
error_at (gimple_location (stmt),
"OpenACC construct inside of non-OpenACC region");
return false;
}
continue;
}
const char *stmt_name, *ctx_stmt_name;
switch (gimple_omp_target_kind (stmt))
{
case GF_OMP_TARGET_KIND_REGION: stmt_name = "target"; break;
case GF_OMP_TARGET_KIND_DATA: stmt_name = "target data"; break;
case GF_OMP_TARGET_KIND_UPDATE: stmt_name = "target update"; break;
case GF_OMP_TARGET_KIND_ENTER_DATA:
stmt_name = "target enter data"; break;
case GF_OMP_TARGET_KIND_EXIT_DATA:
stmt_name = "target exit data"; break;
case GF_OMP_TARGET_KIND_OACC_PARALLEL: stmt_name = "parallel"; break;
case GF_OMP_TARGET_KIND_OACC_KERNELS: stmt_name = "kernels"; break;
case GF_OMP_TARGET_KIND_OACC_DATA: stmt_name = "data"; break;
case GF_OMP_TARGET_KIND_OACC_UPDATE: stmt_name = "update"; break;
case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
stmt_name = "enter/exit data"; break;
case GF_OMP_TARGET_KIND_OACC_HOST_DATA: stmt_name = "host_data";
break;
default: gcc_unreachable ();
}
switch (gimple_omp_target_kind (ctx->stmt))
{
case GF_OMP_TARGET_KIND_REGION: ctx_stmt_name = "target"; break;
case GF_OMP_TARGET_KIND_DATA: ctx_stmt_name = "target data"; break;
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
ctx_stmt_name = "parallel"; break;
case GF_OMP_TARGET_KIND_OACC_KERNELS:
ctx_stmt_name = "kernels"; break;
case GF_OMP_TARGET_KIND_OACC_DATA: ctx_stmt_name = "data"; break;
case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
ctx_stmt_name = "host_data"; break;
default: gcc_unreachable ();
}
/* OpenACC/OpenMP mismatch? */
if (is_gimple_omp_oacc (stmt)
!= is_gimple_omp_oacc (ctx->stmt))
{
error_at (gimple_location (stmt),
"%s %qs construct inside of %s %qs region",
(is_gimple_omp_oacc (stmt)
? "OpenACC" : "OpenMP"), stmt_name,
(is_gimple_omp_oacc (ctx->stmt)
? "OpenACC" : "OpenMP"), ctx_stmt_name);
return false;
}
if (is_gimple_omp_offloaded (ctx->stmt))
{
/* No GIMPLE_OMP_TARGET inside offloaded OpenACC CTX. */
if (is_gimple_omp_oacc (ctx->stmt))
{
error_at (gimple_location (stmt),
"%qs construct inside of %qs region",
stmt_name, ctx_stmt_name);
return false;
}
else
{
warning_at (gimple_location (stmt), 0,
"%qs construct inside of %qs region",
stmt_name, ctx_stmt_name);
}
}
}
break;
default:
break;
}
return true;
}
/* Helper function scan_omp.
Callback for walk_tree or operators in walk_gimple_stmt used to
scan for OMP directives in TP. */
static tree
scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
omp_context *ctx = (omp_context *) wi->info;
tree t = *tp;
switch (TREE_CODE (t))
{
case VAR_DECL:
case PARM_DECL:
case LABEL_DECL:
case RESULT_DECL:
if (ctx)
{
tree repl = remap_decl (t, &ctx->cb);
gcc_checking_assert (TREE_CODE (repl) != ERROR_MARK);
*tp = repl;
}
break;
default:
if (ctx && TYPE_P (t))
*tp = remap_type (t, &ctx->cb);
else if (!DECL_P (t))
{
*walk_subtrees = 1;
if (ctx)
{
tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
if (tem != TREE_TYPE (t))
{
if (TREE_CODE (t) == INTEGER_CST)
*tp = wide_int_to_tree (tem, wi::to_wide (t));
else
TREE_TYPE (t) = tem;
}
}
}
break;
}
return NULL_TREE;
}
/* Return true if FNDECL is a setjmp or a longjmp. */
static bool
setjmp_or_longjmp_p (const_tree fndecl)
{
if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
&& (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
|| DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
return true;
tree declname = DECL_NAME (fndecl);
if (!declname)
return false;
const char *name = IDENTIFIER_POINTER (declname);
return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
}
/* Helper function for scan_omp.
Callback for walk_gimple_stmt used to scan for OMP directives in
the current statement in GSI. */
static tree
scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *stmt = gsi_stmt (*gsi);
omp_context *ctx = (omp_context *) wi->info;
if (gimple_has_location (stmt))
input_location = gimple_location (stmt);
/* Check the nesting restrictions. */
bool remove = false;
if (is_gimple_omp (stmt))
remove = !check_omp_nesting_restrictions (stmt, ctx);
else if (is_gimple_call (stmt))
{
tree fndecl = gimple_call_fndecl (stmt);
if (fndecl)
{
if (setjmp_or_longjmp_p (fndecl)
&& ctx
&& gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
{
remove = true;
error_at (gimple_location (stmt),
"setjmp/longjmp inside simd construct");
}
else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_GOMP_BARRIER:
case BUILT_IN_GOMP_CANCEL:
case BUILT_IN_GOMP_CANCELLATION_POINT:
case BUILT_IN_GOMP_TASKYIELD:
case BUILT_IN_GOMP_TASKWAIT:
case BUILT_IN_GOMP_TASKGROUP_START:
case BUILT_IN_GOMP_TASKGROUP_END:
remove = !check_omp_nesting_restrictions (stmt, ctx);
break;
default:
break;
}
}
}
if (remove)
{
stmt = gimple_build_nop ();
gsi_replace (gsi, stmt, false);
}
*handled_ops_p = true;
switch (gimple_code (stmt))
{
case GIMPLE_OMP_PARALLEL:
taskreg_nesting_level++;
scan_omp_parallel (gsi, ctx);
taskreg_nesting_level--;
break;
case GIMPLE_OMP_TASK:
taskreg_nesting_level++;
scan_omp_task (gsi, ctx);
taskreg_nesting_level--;
break;
case GIMPLE_OMP_FOR:
if (((gimple_omp_for_kind (as_a <gomp_for *> (stmt))
& GF_OMP_FOR_KIND_MASK) == GF_OMP_FOR_KIND_SIMD)
&& omp_maybe_offloaded_ctx (ctx)
&& omp_max_simt_vf ())
scan_omp_simd (gsi, as_a <gomp_for *> (stmt), ctx);
else
scan_omp_for (as_a <gomp_for *> (stmt), ctx);
break;
case GIMPLE_OMP_SECTIONS:
scan_omp_sections (as_a <gomp_sections *> (stmt), ctx);
break;
case GIMPLE_OMP_SINGLE:
scan_omp_single (as_a <gomp_single *> (stmt), ctx);
break;
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_GRID_BODY:
ctx = new_omp_context (stmt, ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
break;
case GIMPLE_OMP_TARGET:
scan_omp_target (as_a <gomp_target *> (stmt), ctx);
break;
case GIMPLE_OMP_TEAMS:
scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
break;
case GIMPLE_BIND:
{
tree var;
*handled_ops_p = false;
if (ctx)
for (var = gimple_bind_vars (as_a <gbind *> (stmt));
var ;
var = DECL_CHAIN (var))
insert_decl_map (&ctx->cb, var, var);
}
break;
default:
*handled_ops_p = false;
break;
}
return NULL_TREE;
}
/* Scan all the statements starting at the current statement. CTX
contains context information about the OMP directives and
clauses found during the scan. */
static void
scan_omp (gimple_seq *body_p, omp_context *ctx)
{
location_t saved_location;
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.info = ctx;
wi.want_locations = true;
saved_location = input_location;
walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
input_location = saved_location;
}
/* Re-gimplification and code generation routines. */
/* Remove omp_member_access_dummy_var variables from gimple_bind_vars
of BIND if in a method. */
static void
maybe_remove_omp_member_access_dummy_vars (gbind *bind)
{
if (DECL_ARGUMENTS (current_function_decl)
&& DECL_ARTIFICIAL (DECL_ARGUMENTS (current_function_decl))
&& (TREE_CODE (TREE_TYPE (DECL_ARGUMENTS (current_function_decl)))
== POINTER_TYPE))
{
tree vars = gimple_bind_vars (bind);
for (tree *pvar = &vars; *pvar; )
if (omp_member_access_dummy_var (*pvar))
*pvar = DECL_CHAIN (*pvar);
else
pvar = &DECL_CHAIN (*pvar);
gimple_bind_set_vars (bind, vars);
}
}
/* Remove omp_member_access_dummy_var variables from BLOCK_VARS of
block and its subblocks. */
static void
remove_member_access_dummy_vars (tree block)
{
for (tree *pvar = &BLOCK_VARS (block); *pvar; )
if (omp_member_access_dummy_var (*pvar))
*pvar = DECL_CHAIN (*pvar);
else
pvar = &DECL_CHAIN (*pvar);
for (block = BLOCK_SUBBLOCKS (block); block; block = BLOCK_CHAIN (block))
remove_member_access_dummy_vars (block);
}
/* If a context was created for STMT when it was scanned, return it. */
static omp_context *
maybe_lookup_ctx (gimple *stmt)
{
splay_tree_node n;
n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
return n ? (omp_context *) n->value : NULL;
}
/* Find the mapping for DECL in CTX or the immediately enclosing
context that has a mapping for DECL.
If CTX is a nested parallel directive, we may have to use the decl
mappings created in CTX's parent context. Suppose that we have the
following parallel nesting (variable UIDs showed for clarity):
iD.1562 = 0;
#omp parallel shared(iD.1562) -> outer parallel
iD.1562 = iD.1562 + 1;
#omp parallel shared (iD.1562) -> inner parallel
iD.1562 = iD.1562 - 1;
Each parallel structure will create a distinct .omp_data_s structure
for copying iD.1562 in/out of the directive:
outer parallel .omp_data_s.1.i -> iD.1562
inner parallel .omp_data_s.2.i -> iD.1562
A shared variable mapping will produce a copy-out operation before
the parallel directive and a copy-in operation after it. So, in
this case we would have:
iD.1562 = 0;
.omp_data_o.1.i = iD.1562;
#omp parallel shared(iD.1562) -> outer parallel
.omp_data_i.1 = &.omp_data_o.1
.omp_data_i.1->i = .omp_data_i.1->i + 1;
.omp_data_o.2.i = iD.1562; -> **
#omp parallel shared(iD.1562) -> inner parallel
.omp_data_i.2 = &.omp_data_o.2
.omp_data_i.2->i = .omp_data_i.2->i - 1;
** This is a problem. The symbol iD.1562 cannot be referenced
inside the body of the outer parallel region. But since we are
emitting this copy operation while expanding the inner parallel
directive, we need to access the CTX structure of the outer
parallel directive to get the correct mapping:
.omp_data_o.2.i = .omp_data_i.1->i
Since there may be other workshare or parallel directives enclosing
the parallel directive, it may be necessary to walk up the context
parent chain. This is not a problem in general because nested
parallelism happens only rarely. */
static tree
lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
{
tree t;
omp_context *up;
for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
t = maybe_lookup_decl (decl, up);
gcc_assert (!ctx->is_nested || t || is_global_var (decl));
return t ? t : decl;
}
/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
in outer contexts. */
static tree
maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
{
tree t = NULL;
omp_context *up;
for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
t = maybe_lookup_decl (decl, up);
return t ? t : decl;
}
/* Construct the initialization value for reduction operation OP. */
tree
omp_reduction_init_op (location_t loc, enum tree_code op, tree type)
{
switch (op)
{
case PLUS_EXPR:
case MINUS_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_XOR_EXPR:
case NE_EXPR:
return build_zero_cst (type);
case MULT_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_ANDIF_EXPR:
case EQ_EXPR:
return fold_convert_loc (loc, type, integer_one_node);
case BIT_AND_EXPR:
return fold_convert_loc (loc, type, integer_minus_one_node);
case MAX_EXPR:
if (SCALAR_FLOAT_TYPE_P (type))
{
REAL_VALUE_TYPE max, min;
if (HONOR_INFINITIES (type))
{
real_inf (&max);
real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
}
else
real_maxval (&min, 1, TYPE_MODE (type));
return build_real (type, min);
}
else if (POINTER_TYPE_P (type))
{
wide_int min
= wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
return wide_int_to_tree (type, min);
}
else
{
gcc_assert (INTEGRAL_TYPE_P (type));
return TYPE_MIN_VALUE (type);
}
case MIN_EXPR:
if (SCALAR_FLOAT_TYPE_P (type))
{
REAL_VALUE_TYPE max;
if (HONOR_INFINITIES (type))
real_inf (&max);
else
real_maxval (&max, 0, TYPE_MODE (type));
return build_real (type, max);
}
else if (POINTER_TYPE_P (type))
{
wide_int max
= wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
return wide_int_to_tree (type, max);
}
else
{
gcc_assert (INTEGRAL_TYPE_P (type));
return TYPE_MAX_VALUE (type);
}
default:
gcc_unreachable ();
}
}
/* Construct the initialization value for reduction CLAUSE. */
tree
omp_reduction_init (tree clause, tree type)
{
return omp_reduction_init_op (OMP_CLAUSE_LOCATION (clause),
OMP_CLAUSE_REDUCTION_CODE (clause), type);
}
/* Return alignment to be assumed for var in CLAUSE, which should be
OMP_CLAUSE_ALIGNED. */
static tree
omp_clause_aligned_alignment (tree clause)
{
if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
/* Otherwise return implementation defined alignment. */
unsigned int al = 1;
opt_scalar_mode mode_iter;
auto_vector_sizes sizes;
targetm.vectorize.autovectorize_vector_sizes (&sizes);
poly_uint64 vs = 0;
for (unsigned int i = 0; i < sizes.length (); ++i)
vs = ordered_max (vs, sizes[i]);
static enum mode_class classes[]
= { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
for (int i = 0; i < 4; i += 2)
/* The for loop above dictates that we only walk through scalar classes. */
FOR_EACH_MODE_IN_CLASS (mode_iter, classes[i])
{
scalar_mode mode = mode_iter.require ();
machine_mode vmode = targetm.vectorize.preferred_simd_mode (mode);
if (GET_MODE_CLASS (vmode) != classes[i + 1])
continue;
while (maybe_ne (vs, 0U)
&& known_lt (GET_MODE_SIZE (vmode), vs)
&& GET_MODE_2XWIDER_MODE (vmode).exists ())
vmode = GET_MODE_2XWIDER_MODE (vmode).require ();
tree type = lang_hooks.types.type_for_mode (mode, 1);
if (type == NULL_TREE || TYPE_MODE (type) != mode)
continue;
poly_uint64 nelts = exact_div (GET_MODE_SIZE (vmode),
GET_MODE_SIZE (mode));
type = build_vector_type (type, nelts);
if (TYPE_MODE (type) != vmode)
continue;
if (TYPE_ALIGN_UNIT (type) > al)
al = TYPE_ALIGN_UNIT (type);
}
return build_int_cst (integer_type_node, al);
}
/* This structure is part of the interface between lower_rec_simd_input_clauses
and lower_rec_input_clauses. */
struct omplow_simd_context {
omplow_simd_context () { memset (this, 0, sizeof (*this)); }
tree idx;
tree lane;
vec<tree, va_heap> simt_eargs;
gimple_seq simt_dlist;
poly_uint64_pod max_vf;
bool is_simt;
};
/* Helper function of lower_rec_input_clauses, used for #pragma omp simd
privatization. */
static bool
lower_rec_simd_input_clauses (tree new_var, omp_context *ctx,
omplow_simd_context *sctx, tree &ivar, tree &lvar)
{
if (known_eq (sctx->max_vf, 0U))
{
sctx->max_vf = sctx->is_simt ? omp_max_simt_vf () : omp_max_vf ();
if (maybe_gt (sctx->max_vf, 1U))
{
tree c = omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_SAFELEN);
if (c)
{
poly_uint64 safe_len;
if (!poly_int_tree_p (OMP_CLAUSE_SAFELEN_EXPR (c), &safe_len)
|| maybe_lt (safe_len, 1U))
sctx->max_vf = 1;
else
sctx->max_vf = lower_bound (sctx->max_vf, safe_len);
}
}
if (maybe_gt (sctx->max_vf, 1U))
{
sctx->idx = create_tmp_var (unsigned_type_node);
sctx->lane = create_tmp_var (unsigned_type_node);
}
}
if (known_eq (sctx->max_vf, 1U))
return false;
if (sctx->is_simt)
{
if (is_gimple_reg (new_var))
{
ivar = lvar = new_var;
return true;
}
tree type = TREE_TYPE (new_var), ptype = build_pointer_type (type);
ivar = lvar = create_tmp_var (type);
TREE_ADDRESSABLE (ivar) = 1;
DECL_ATTRIBUTES (ivar) = tree_cons (get_identifier ("omp simt private"),
NULL, DECL_ATTRIBUTES (ivar));
sctx->simt_eargs.safe_push (build1 (ADDR_EXPR, ptype, ivar));
tree clobber = build_constructor (type, NULL);
TREE_THIS_VOLATILE (clobber) = 1;
gimple *g = gimple_build_assign (ivar, clobber);
gimple_seq_add_stmt (&sctx->simt_dlist, g);
}
else
{
tree atype = build_array_type_nelts (TREE_TYPE (new_var), sctx->max_vf);
tree avar = create_tmp_var_raw (atype);
if (TREE_ADDRESSABLE (new_var))
TREE_ADDRESSABLE (avar) = 1;
DECL_ATTRIBUTES (avar)
= tree_cons (get_identifier ("omp simd array"), NULL,
DECL_ATTRIBUTES (avar));
gimple_add_tmp_var (avar);
ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, sctx->idx,
NULL_TREE, NULL_TREE);
lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, sctx->lane,
NULL_TREE, NULL_TREE);
}
if (DECL_P (new_var))
{
SET_DECL_VALUE_EXPR (new_var, lvar);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
return true;
}
/* Helper function of lower_rec_input_clauses. For a reference
in simd reduction, add an underlying variable it will reference. */
static void
handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
{
tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
if (TREE_CONSTANT (z))
{
z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)),
get_name (new_vard));
gimple_add_tmp_var (z);
TREE_ADDRESSABLE (z) = 1;
z = build_fold_addr_expr_loc (loc, z);
gimplify_assign (new_vard, z, ilist);
}
}
/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
from the receiver (aka child) side and initializers for REFERENCE_TYPE
private variables. Initialization statements go in ILIST, while calls
to destructors go in DLIST. */
static void
lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
omp_context *ctx, struct omp_for_data *fd)
{
tree c, dtor, copyin_seq, x, ptr;
bool copyin_by_ref = false;
bool lastprivate_firstprivate = false;
bool reduction_omp_orig_ref = false;
int pass;
bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
omplow_simd_context sctx = omplow_simd_context ();
tree simt_lane = NULL_TREE, simtrec = NULL_TREE;
tree ivar = NULL_TREE, lvar = NULL_TREE, uid = NULL_TREE;
gimple_seq llist[3] = { };
copyin_seq = NULL;
sctx.is_simt = is_simd && omp_find_clause (clauses, OMP_CLAUSE__SIMT_);
/* Set max_vf=1 (which will later enforce safelen=1) in simd loops
with data sharing clauses referencing variable sized vars. That
is unnecessarily hard to support and very unlikely to result in
vectorized code anyway. */
if (is_simd)
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_LINEAR:
if (OMP_CLAUSE_LINEAR_ARRAY (c))
sctx.max_vf = 1;
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LASTPRIVATE:
if (is_variable_sized (OMP_CLAUSE_DECL (c)))
sctx.max_vf = 1;
break;
case OMP_CLAUSE_REDUCTION:
if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF
|| is_variable_sized (OMP_CLAUSE_DECL (c)))
sctx.max_vf = 1;
break;
default:
continue;
}
/* Add a placeholder for simduid. */
if (sctx.is_simt && maybe_ne (sctx.max_vf, 1U))
sctx.simt_eargs.safe_push (NULL_TREE);
/* Do all the fixed sized types in the first pass, and the variable sized
types in the second pass. This makes sure that the scalar arguments to
the variable sized types are processed before we use them in the
variable sized operations. */
for (pass = 0; pass < 2; ++pass)
{
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
tree var, new_var;
bool by_ref;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
switch (c_kind)
{
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_PRIVATE_DEBUG (c))
continue;
break;
case OMP_CLAUSE_SHARED:
/* Ignore shared directives in teams construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
continue;
if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
{
gcc_assert (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c)
|| is_global_var (OMP_CLAUSE_DECL (c)));
continue;
}
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
break;
case OMP_CLAUSE_LINEAR:
if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c)
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
lastprivate_firstprivate = true;
break;
case OMP_CLAUSE_REDUCTION:
if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
reduction_omp_orig_ref = true;
break;
case OMP_CLAUSE__LOOPTEMP_:
/* Handle _looptemp_ clauses only on parallel/task. */
if (fd)
continue;
break;
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
{
lastprivate_firstprivate = true;
if (pass != 0 || is_taskloop_ctx (ctx))
continue;
}
/* Even without corresponding firstprivate, if
decl is Fortran allocatable, it needs outer var
reference. */
else if (pass == 0
&& lang_hooks.decls.omp_private_outer_ref
(OMP_CLAUSE_DECL (c)))
lastprivate_firstprivate = true;
break;
case OMP_CLAUSE_ALIGNED:
if (pass == 0)
continue;
var = OMP_CLAUSE_DECL (c);
if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
&& !is_global_var (var))
{
new_var = maybe_lookup_decl (var, ctx);
if (new_var == NULL_TREE)
new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
tree alarg = omp_clause_aligned_alignment (c);
alarg = fold_convert_loc (clause_loc, size_type_node, alarg);
x = build_call_expr_loc (clause_loc, x, 2, new_var, alarg);
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
gimplify_and_add (x, ilist);
}
else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
&& is_global_var (var))
{
tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
new_var = lookup_decl (var, ctx);
t = maybe_lookup_decl_in_outer_ctx (var, ctx);
t = build_fold_addr_expr_loc (clause_loc, t);
t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
tree alarg = omp_clause_aligned_alignment (c);
alarg = fold_convert_loc (clause_loc, size_type_node, alarg);
t = build_call_expr_loc (clause_loc, t2, 2, t, alarg);
t = fold_convert_loc (clause_loc, ptype, t);
x = create_tmp_var (ptype);
t = build2 (MODIFY_EXPR, ptype, x, t);
gimplify_and_add (t, ilist);
t = build_simple_mem_ref_loc (clause_loc, x);
SET_DECL_VALUE_EXPR (new_var, t);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
continue;
default:
continue;
}
new_var = var = OMP_CLAUSE_DECL (c);
if (c_kind == OMP_CLAUSE_REDUCTION && TREE_CODE (var) == MEM_REF)
{
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == POINTER_PLUS_EXPR)
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == INDIRECT_REF
|| TREE_CODE (var) == ADDR_EXPR)
var = TREE_OPERAND (var, 0);
if (is_variable_sized (var))
{
gcc_assert (DECL_HAS_VALUE_EXPR_P (var));
var = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var) == INDIRECT_REF);
var = TREE_OPERAND (var, 0);
gcc_assert (DECL_P (var));
}
new_var = var;
}
if (c_kind != OMP_CLAUSE_COPYIN)
new_var = lookup_decl (var, ctx);
if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
{
if (pass != 0)
continue;
}
/* C/C++ array section reductions. */
else if (c_kind == OMP_CLAUSE_REDUCTION
&& var != OMP_CLAUSE_DECL (c))
{
if (pass == 0)
continue;
tree bias = TREE_OPERAND (OMP_CLAUSE_DECL (c), 1);
tree orig_var = TREE_OPERAND (OMP_CLAUSE_DECL (c), 0);
if (TREE_CODE (orig_var) == POINTER_PLUS_EXPR)
{
tree b = TREE_OPERAND (orig_var, 1);
b = maybe_lookup_decl (b, ctx);
if (b == NULL)
{
b = TREE_OPERAND (orig_var, 1);
b = maybe_lookup_decl_in_outer_ctx (b, ctx);
}
if (integer_zerop (bias))
bias = b;
else
{
bias = fold_convert_loc (clause_loc,
TREE_TYPE (b), bias);
bias = fold_build2_loc (clause_loc, PLUS_EXPR,
TREE_TYPE (b), b, bias);
}
orig_var = TREE_OPERAND (orig_var, 0);
}
if (TREE_CODE (orig_var) == INDIRECT_REF
|| TREE_CODE (orig_var) == ADDR_EXPR)
orig_var = TREE_OPERAND (orig_var, 0);
tree d = OMP_CLAUSE_DECL (c);
tree type = TREE_TYPE (d);
gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
const char *name = get_name (orig_var);
if (TREE_CONSTANT (v))
{
x = create_tmp_var_raw (type, name);
gimple_add_tmp_var (x);
TREE_ADDRESSABLE (x) = 1;
x = build_fold_addr_expr_loc (clause_loc, x);
}
else
{
tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
tree t = maybe_lookup_decl (v, ctx);
if (t)
v = t;
else
v = maybe_lookup_decl_in_outer_ctx (v, ctx);
gimplify_expr (&v, ilist, NULL, is_gimple_val, fb_rvalue);
t = fold_build2_loc (clause_loc, PLUS_EXPR,
TREE_TYPE (v), v,
build_int_cst (TREE_TYPE (v), 1));
t = fold_build2_loc (clause_loc, MULT_EXPR,
TREE_TYPE (v), t,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
tree al = size_int (TYPE_ALIGN (TREE_TYPE (type)));
x = build_call_expr_loc (clause_loc, atmp, 2, t, al);
}
tree ptype = build_pointer_type (TREE_TYPE (type));
x = fold_convert_loc (clause_loc, ptype, x);
tree y = create_tmp_var (ptype, name);
gimplify_assign (y, x, ilist);
x = y;
tree yb = y;
if (!integer_zerop (bias))
{
bias = fold_convert_loc (clause_loc, pointer_sized_int_node,
bias);
yb = fold_convert_loc (clause_loc, pointer_sized_int_node,
x);
yb = fold_build2_loc (clause_loc, MINUS_EXPR,
pointer_sized_int_node, yb, bias);
x = fold_convert_loc (clause_loc, TREE_TYPE (x), yb);
yb = create_tmp_var (ptype, name);
gimplify_assign (yb, x, ilist);
x = yb;
}
d = TREE_OPERAND (d, 0);
if (TREE_CODE (d) == POINTER_PLUS_EXPR)
d = TREE_OPERAND (d, 0);
if (TREE_CODE (d) == ADDR_EXPR)
{
if (orig_var != var)
{
gcc_assert (is_variable_sized (orig_var));
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var),
x);
gimplify_assign (new_var, x, ilist);
tree new_orig_var = lookup_decl (orig_var, ctx);
tree t = build_fold_indirect_ref (new_var);
DECL_IGNORED_P (new_var) = 0;
TREE_THIS_NOTRAP (t);
SET_DECL_VALUE_EXPR (new_orig_var, t);
DECL_HAS_VALUE_EXPR_P (new_orig_var) = 1;
}
else
{
x = build2 (MEM_REF, TREE_TYPE (new_var), x,
build_int_cst (ptype, 0));
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
}
else
{
gcc_assert (orig_var == var);
if (TREE_CODE (d) == INDIRECT_REF)
{
x = create_tmp_var (ptype, name);
TREE_ADDRESSABLE (x) = 1;
gimplify_assign (x, yb, ilist);
x = build_fold_addr_expr_loc (clause_loc, x);
}
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_assign (new_var, x, ilist);
}
tree y1 = create_tmp_var (ptype, NULL);
gimplify_assign (y1, y, ilist);
tree i2 = NULL_TREE, y2 = NULL_TREE;
tree body2 = NULL_TREE, end2 = NULL_TREE;
tree y3 = NULL_TREE, y4 = NULL_TREE;
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) || is_simd)
{
y2 = create_tmp_var (ptype, NULL);
gimplify_assign (y2, y, ilist);
tree ref = build_outer_var_ref (var, ctx);
/* For ref build_outer_var_ref already performs this. */
if (TREE_CODE (d) == INDIRECT_REF)
gcc_assert (omp_is_reference (var));
else if (TREE_CODE (d) == ADDR_EXPR)
ref = build_fold_addr_expr (ref);
else if (omp_is_reference (var))
ref = build_fold_addr_expr (ref);
ref = fold_convert_loc (clause_loc, ptype, ref);
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
&& OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
{
y3 = create_tmp_var (ptype, NULL);
gimplify_assign (y3, unshare_expr (ref), ilist);
}
if (is_simd)
{
y4 = create_tmp_var (ptype, NULL);
gimplify_assign (y4, ref, dlist);
}
}
tree i = create_tmp_var (TREE_TYPE (v), NULL);
gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), ilist);
tree body = create_artificial_label (UNKNOWN_LOCATION);
tree end = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (ilist, gimple_build_label (body));
if (y2)
{
i2 = create_tmp_var (TREE_TYPE (v), NULL);
gimplify_assign (i2, build_int_cst (TREE_TYPE (v), 0), dlist);
body2 = create_artificial_label (UNKNOWN_LOCATION);
end2 = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (dlist, gimple_build_label (body2));
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
tree decl_placeholder
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c);
SET_DECL_VALUE_EXPR (decl_placeholder,
build_simple_mem_ref (y1));
DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 1;
SET_DECL_VALUE_EXPR (placeholder,
y3 ? build_simple_mem_ref (y3)
: error_mark_node);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
x = lang_hooks.decls.omp_clause_default_ctor
(c, build_simple_mem_ref (y1),
y3 ? build_simple_mem_ref (y3) : NULL_TREE);
if (x)
gimplify_and_add (x, ilist);
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (ilist, tseq);
}
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
if (is_simd)
{
SET_DECL_VALUE_EXPR (decl_placeholder,
build_simple_mem_ref (y2));
SET_DECL_VALUE_EXPR (placeholder,
build_simple_mem_ref (y4));
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (dlist, tseq);
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
}
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 0;
x = lang_hooks.decls.omp_clause_dtor
(c, build_simple_mem_ref (y2));
if (x)
{
gimple_seq tseq = NULL;
dtor = x;
gimplify_stmt (&dtor, &tseq);
gimple_seq_add_seq (dlist, tseq);
}
}
else
{
x = omp_reduction_init (c, TREE_TYPE (type));
enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
/* reduction(-:var) sums up the partial results, so it
acts identically to reduction(+:var). */
if (code == MINUS_EXPR)
code = PLUS_EXPR;
gimplify_assign (build_simple_mem_ref (y1), x, ilist);
if (is_simd)
{
x = build2 (code, TREE_TYPE (type),
build_simple_mem_ref (y4),
build_simple_mem_ref (y2));
gimplify_assign (build_simple_mem_ref (y4), x, dlist);
}
}
gimple *g
= gimple_build_assign (y1, POINTER_PLUS_EXPR, y1,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (ilist, g);
if (y3)
{
g = gimple_build_assign (y3, POINTER_PLUS_EXPR, y3,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (ilist, g);
}
g = gimple_build_assign (i, PLUS_EXPR, i,
build_int_cst (TREE_TYPE (i), 1));
gimple_seq_add_stmt (ilist, g);
g = gimple_build_cond (LE_EXPR, i, v, body, end);
gimple_seq_add_stmt (ilist, g);
gimple_seq_add_stmt (ilist, gimple_build_label (end));
if (y2)
{
g = gimple_build_assign (y2, POINTER_PLUS_EXPR, y2,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (dlist, g);
if (y4)
{
g = gimple_build_assign
(y4, POINTER_PLUS_EXPR, y4,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (dlist, g);
}
g = gimple_build_assign (i2, PLUS_EXPR, i2,
build_int_cst (TREE_TYPE (i2), 1));
gimple_seq_add_stmt (dlist, g);
g = gimple_build_cond (LE_EXPR, i2, v, body2, end2);
gimple_seq_add_stmt (dlist, g);
gimple_seq_add_stmt (dlist, gimple_build_label (end2));
}
continue;
}
else if (is_variable_sized (var))
{
/* For variable sized types, we need to allocate the
actual storage here. Call alloca and store the
result in the pointer decl that we created elsewhere. */
if (pass == 0)
continue;
if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
{
gcall *stmt;
tree tmp, atmp;
ptr = DECL_VALUE_EXPR (new_var);
gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
ptr = TREE_OPERAND (ptr, 0);
gcc_assert (DECL_P (ptr));
x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
/* void *tmp = __builtin_alloca */
atmp = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
stmt = gimple_build_call (atmp, 2, x,
size_int (DECL_ALIGN (var)));
tmp = create_tmp_var_raw (ptr_type_node);
gimple_add_tmp_var (tmp);
gimple_call_set_lhs (stmt, tmp);
gimple_seq_add_stmt (ilist, stmt);
x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
gimplify_assign (ptr, x, ilist);
}
}
else if (omp_is_reference (var))
{
/* For references that are being privatized for Fortran,
allocate new backing storage for the new pointer
variable. This allows us to avoid changing all the
code that expects a pointer to something that expects
a direct variable. */
if (pass == 0)
continue;
x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
{
x = build_receiver_ref (var, false, ctx);
x = build_fold_addr_expr_loc (clause_loc, x);
}
else if (TREE_CONSTANT (x))
{
/* For reduction in SIMD loop, defer adding the
initialization of the reference, because if we decide
to use SIMD array for it, the initilization could cause
expansion ICE. */
if (c_kind == OMP_CLAUSE_REDUCTION && is_simd)
x = NULL_TREE;
else
{
x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
get_name (var));
gimple_add_tmp_var (x);
TREE_ADDRESSABLE (x) = 1;
x = build_fold_addr_expr_loc (clause_loc, x);
}
}
else
{
tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
tree rtype = TREE_TYPE (TREE_TYPE (new_var));
tree al = size_int (TYPE_ALIGN (rtype));
x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
}
if (x)
{
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_assign (new_var, x, ilist);
}
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
}
else if (c_kind == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
if (pass == 0)
continue;
}
else if (pass != 0)
continue;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
/* Ignore shared directives in teams construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
continue;
/* Shared global vars are just accessed directly. */
if (is_global_var (new_var))
break;
/* For taskloop firstprivate/lastprivate, represented
as firstprivate and shared clause on the task, new_var
is the firstprivate var. */
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
break;
/* Set up the DECL_VALUE_EXPR for shared variables now. This
needs to be delayed until after fixup_child_record_type so
that we get the correct type during the dereference. */
by_ref = use_pointer_for_field (var, ctx);
x = build_receiver_ref (var, by_ref, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
/* ??? If VAR is not passed by reference, and the variable
hasn't been initialized yet, then we'll get a warning for
the store into the omp_data_s structure. Ideally, we'd be
able to notice this and not store anything at all, but
we're generating code too early. Suppress the warning. */
if (!by_ref)
TREE_NO_WARNING (var) = 1;
break;
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
x = build_outer_var_ref (var, ctx);
else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
{
if (is_task_ctx (ctx))
x = build_receiver_ref (var, false, ctx);
else
x = build_outer_var_ref (var, ctx, OMP_CLAUSE_PRIVATE);
}
else
x = NULL;
do_private:
tree nx;
nx = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (new_var), x);
if (is_simd)
{
tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
if ((TREE_ADDRESSABLE (new_var) || nx || y
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
&& lower_rec_simd_input_clauses (new_var, ctx, &sctx,
ivar, lvar))
{
if (nx)
x = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (ivar), x);
if (nx && x)
gimplify_and_add (x, &llist[0]);
if (y)
{
y = lang_hooks.decls.omp_clause_dtor (c, ivar);
if (y)
{
gimple_seq tseq = NULL;
dtor = y;
gimplify_stmt (&dtor, &tseq);
gimple_seq_add_seq (&llist[1], tseq);
}
}
break;
}
}
if (nx)
gimplify_and_add (nx, ilist);
/* FALLTHRU */
do_dtor:
x = lang_hooks.decls.omp_clause_dtor (c, new_var);
if (x)
{
gimple_seq tseq = NULL;
dtor = x;
gimplify_stmt (&dtor, &tseq);
gimple_seq_add_seq (dlist, tseq);
}
break;
case OMP_CLAUSE_LINEAR:
if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
goto do_firstprivate;
if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
x = NULL;
else
x = build_outer_var_ref (var, ctx);
goto do_private;
case OMP_CLAUSE_FIRSTPRIVATE:
if (is_task_ctx (ctx))
{
if (omp_is_reference (var) || is_variable_sized (var))
goto do_dtor;
else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
ctx))
|| use_pointer_for_field (var, NULL))
{
x = build_receiver_ref (var, false, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
goto do_dtor;
}
}
do_firstprivate:
x = build_outer_var_ref (var, ctx);
if (is_simd)
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& gimple_omp_for_combined_into_p (ctx->stmt))
{
tree t = OMP_CLAUSE_LINEAR_STEP (c);
tree stept = TREE_TYPE (t);
tree ct = omp_find_clause (clauses,
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (ct);
tree l = OMP_CLAUSE_DECL (ct);
tree n1 = fd->loop.n1;
tree step = fd->loop.step;
tree itype = TREE_TYPE (l);
if (POINTER_TYPE_P (itype))
itype = signed_type_for (itype);
l = fold_build2 (MINUS_EXPR, itype, l, n1);
if (TYPE_UNSIGNED (itype)
&& fd->loop.cond_code == GT_EXPR)
l = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, l),
fold_build1 (NEGATE_EXPR,
itype, step));
else
l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
t = fold_build2 (MULT_EXPR, stept,
fold_convert (stept, l), t);
if (OMP_CLAUSE_LINEAR_ARRAY (c))
{
x = lang_hooks.decls.omp_clause_linear_ctor
(c, new_var, x, t);
gimplify_and_add (x, ilist);
goto do_dtor;
}
if (POINTER_TYPE_P (TREE_TYPE (x)))
x = fold_build2 (POINTER_PLUS_EXPR,
TREE_TYPE (x), x, t);
else
x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
}
if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
|| TREE_ADDRESSABLE (new_var))
&& lower_rec_simd_input_clauses (new_var, ctx, &sctx,
ivar, lvar))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
{
tree iv = create_tmp_var (TREE_TYPE (new_var));
x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
gimplify_and_add (x, ilist);
gimple_stmt_iterator gsi
= gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
gassign *g
= gimple_build_assign (unshare_expr (lvar), iv);
gsi_insert_before_without_update (&gsi, g,
GSI_SAME_STMT);
tree t = OMP_CLAUSE_LINEAR_STEP (c);
enum tree_code code = PLUS_EXPR;
if (POINTER_TYPE_P (TREE_TYPE (new_var)))
code = POINTER_PLUS_EXPR;
g = gimple_build_assign (iv, code, iv, t);
gsi_insert_before_without_update (&gsi, g,
GSI_SAME_STMT);
break;
}
x = lang_hooks.decls.omp_clause_copy_ctor
(c, unshare_expr (ivar), x);
gimplify_and_add (x, &llist[0]);
x = lang_hooks.decls.omp_clause_dtor (c, ivar);
if (x)
{
gimple_seq tseq = NULL;
dtor = x;
gimplify_stmt (&dtor, &tseq);
gimple_seq_add_seq (&llist[1], tseq);
}
break;
}
}
x = lang_hooks.decls.omp_clause_copy_ctor
(c, unshare_expr (new_var), x);
gimplify_and_add (x, ilist);
goto do_dtor;
case OMP_CLAUSE__LOOPTEMP_:
gcc_assert (is_taskreg_ctx (ctx));
x = build_outer_var_ref (var, ctx);
x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
gimplify_and_add (x, ilist);
break;
case OMP_CLAUSE_COPYIN:
by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
append_to_statement_list (x, ©in_seq);
copyin_by_ref |= by_ref;
break;
case OMP_CLAUSE_REDUCTION:
/* OpenACC reductions are initialized using the
GOACC_REDUCTION internal function. */
if (is_gimple_omp_oacc (ctx->stmt))
break;
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
gimple *tseq;
x = build_outer_var_ref (var, ctx);
if (omp_is_reference (var)
&& !useless_type_conversion_p (TREE_TYPE (placeholder),
TREE_TYPE (x)))
x = build_fold_addr_expr_loc (clause_loc, x);
SET_DECL_VALUE_EXPR (placeholder, x);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
tree new_vard = new_var;
if (omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
}
if (is_simd
&& lower_rec_simd_input_clauses (new_var, ctx, &sctx,
ivar, lvar))
{
if (new_vard == new_var)
{
gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
SET_DECL_VALUE_EXPR (new_var, ivar);
}
else
{
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (ivar));
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
}
x = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (ivar),
build_outer_var_ref (var, ctx));
if (x)
gimplify_and_add (x, &llist[0]);
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (&llist[0], tseq);
}
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (&llist[1], tseq);
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
if (new_vard == new_var)
SET_DECL_VALUE_EXPR (new_var, lvar);
else
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (lvar));
x = lang_hooks.decls.omp_clause_dtor (c, ivar);
if (x)
{
tseq = NULL;
dtor = x;
gimplify_stmt (&dtor, &tseq);
gimple_seq_add_seq (&llist[1], tseq);
}
break;
}
/* If this is a reference to constant size reduction var
with placeholder, we haven't emitted the initializer
for it because it is undesirable if SIMD arrays are used.
But if they aren't used, we need to emit the deferred
initialization now. */
else if (omp_is_reference (var) && is_simd)
handle_simd_reference (clause_loc, new_vard, ilist);
x = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (new_var),
build_outer_var_ref (var, ctx));
if (x)
gimplify_and_add (x, ilist);
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (ilist, tseq);
}
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
if (is_simd)
{
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (dlist, tseq);
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
}
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
goto do_dtor;
}
else
{
x = omp_reduction_init (c, TREE_TYPE (new_var));
gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
/* reduction(-:var) sums up the partial results, so it
acts identically to reduction(+:var). */
if (code == MINUS_EXPR)
code = PLUS_EXPR;
tree new_vard = new_var;
if (is_simd && omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
}
if (is_simd
&& lower_rec_simd_input_clauses (new_var, ctx, &sctx,
ivar, lvar))
{
tree ref = build_outer_var_ref (var, ctx);
gimplify_assign (unshare_expr (ivar), x, &llist[0]);
if (sctx.is_simt)
{
if (!simt_lane)
simt_lane = create_tmp_var (unsigned_type_node);
x = build_call_expr_internal_loc
(UNKNOWN_LOCATION, IFN_GOMP_SIMT_XCHG_BFLY,
TREE_TYPE (ivar), 2, ivar, simt_lane);
x = build2 (code, TREE_TYPE (ivar), ivar, x);
gimplify_assign (ivar, x, &llist[2]);
}
x = build2 (code, TREE_TYPE (ref), ref, ivar);
ref = build_outer_var_ref (var, ctx);
gimplify_assign (ref, x, &llist[1]);
if (new_vard != new_var)
{
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (lvar));
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
}
}
else
{
if (omp_is_reference (var) && is_simd)
handle_simd_reference (clause_loc, new_vard, ilist);
gimplify_assign (new_var, x, ilist);
if (is_simd)
{
tree ref = build_outer_var_ref (var, ctx);
x = build2 (code, TREE_TYPE (ref), ref, new_var);
ref = build_outer_var_ref (var, ctx);
gimplify_assign (ref, x, dlist);
}
}
}
break;
default:
gcc_unreachable ();
}
}
}
if (known_eq (sctx.max_vf, 1U))
sctx.is_simt = false;
if (sctx.lane || sctx.is_simt)
{
uid = create_tmp_var (ptr_type_node, "simduid");
/* Don't want uninit warnings on simduid, it is always uninitialized,
but we use it not for the value, but for the DECL_UID only. */
TREE_NO_WARNING (uid) = 1;
c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
OMP_CLAUSE__SIMDUID__DECL (c) = uid;
OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
gimple_omp_for_set_clauses (ctx->stmt, c);
}
/* Emit calls denoting privatized variables and initializing a pointer to
structure that holds private variables as fields after ompdevlow pass. */
if (sctx.is_simt)
{
sctx.simt_eargs[0] = uid;
gimple *g
= gimple_build_call_internal_vec (IFN_GOMP_SIMT_ENTER, sctx.simt_eargs);
gimple_call_set_lhs (g, uid);
gimple_seq_add_stmt (ilist, g);
sctx.simt_eargs.release ();
simtrec = create_tmp_var (ptr_type_node, ".omp_simt");
g = gimple_build_call_internal (IFN_GOMP_SIMT_ENTER_ALLOC, 1, uid);
gimple_call_set_lhs (g, simtrec);
gimple_seq_add_stmt (ilist, g);
}
if (sctx.lane)
{
gimple *g
= gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
gimple_call_set_lhs (g, sctx.lane);
gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
g = gimple_build_assign (sctx.lane, INTEGER_CST,
build_int_cst (unsigned_type_node, 0));
gimple_seq_add_stmt (ilist, g);
/* Emit reductions across SIMT lanes in log_2(simt_vf) steps. */
if (llist[2])
{
tree simt_vf = create_tmp_var (unsigned_type_node);
g = gimple_build_call_internal (IFN_GOMP_SIMT_VF, 0);
gimple_call_set_lhs (g, simt_vf);
gimple_seq_add_stmt (dlist, g);
tree t = build_int_cst (unsigned_type_node, 1);
g = gimple_build_assign (simt_lane, INTEGER_CST, t);
gimple_seq_add_stmt (dlist, g);
t = build_int_cst (unsigned_type_node, 0);
g = gimple_build_assign (sctx.idx, INTEGER_CST, t);
gimple_seq_add_stmt (dlist, g);
tree body = create_artificial_label (UNKNOWN_LOCATION);
tree header = create_artificial_label (UNKNOWN_LOCATION);
tree end = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (dlist, gimple_build_goto (header));
gimple_seq_add_stmt (dlist, gimple_build_label (body));
gimple_seq_add_seq (dlist, llist[2]);
g = gimple_build_assign (simt_lane, LSHIFT_EXPR, simt_lane, integer_one_node);
gimple_seq_add_stmt (dlist, g);
gimple_seq_add_stmt (dlist, gimple_build_label (header));
g = gimple_build_cond (LT_EXPR, simt_lane, simt_vf, body, end);
gimple_seq_add_stmt (dlist, g);
gimple_seq_add_stmt (dlist, gimple_build_label (end));
}
for (int i = 0; i < 2; i++)
if (llist[i])
{
tree vf = create_tmp_var (unsigned_type_node);
g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
gimple_call_set_lhs (g, vf);
gimple_seq *seq = i == 0 ? ilist : dlist;
gimple_seq_add_stmt (seq, g);
tree t = build_int_cst (unsigned_type_node, 0);
g = gimple_build_assign (sctx.idx, INTEGER_CST, t);
gimple_seq_add_stmt (seq, g);
tree body = create_artificial_label (UNKNOWN_LOCATION);
tree header = create_artificial_label (UNKNOWN_LOCATION);
tree end = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (seq, gimple_build_goto (header));
gimple_seq_add_stmt (seq, gimple_build_label (body));
gimple_seq_add_seq (seq, llist[i]);
t = build_int_cst (unsigned_type_node, 1);
g = gimple_build_assign (sctx.idx, PLUS_EXPR, sctx.idx, t);
gimple_seq_add_stmt (seq, g);
gimple_seq_add_stmt (seq, gimple_build_label (header));
g = gimple_build_cond (LT_EXPR, sctx.idx, vf, body, end);
gimple_seq_add_stmt (seq, g);
gimple_seq_add_stmt (seq, gimple_build_label (end));
}
}
if (sctx.is_simt)
{
gimple_seq_add_seq (dlist, sctx.simt_dlist);
gimple *g
= gimple_build_call_internal (IFN_GOMP_SIMT_EXIT, 1, simtrec);
gimple_seq_add_stmt (dlist, g);
}
/* The copyin sequence is not to be executed by the main thread, since
that would result in self-copies. Perhaps not visible to scalars,
but it certainly is to C++ operator=. */
if (copyin_seq)
{
x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
0);
x = build2 (NE_EXPR, boolean_type_node, x,
build_int_cst (TREE_TYPE (x), 0));
x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
gimplify_and_add (x, ilist);
}
/* If any copyin variable is passed by reference, we must ensure the
master thread doesn't modify it before it is copied over in all
threads. Similarly for variables in both firstprivate and
lastprivate clauses we need to ensure the lastprivate copying
happens after firstprivate copying in all threads. And similarly
for UDRs if initializer expression refers to omp_orig. */
if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
{
/* Don't add any barrier for #pragma omp simd or
#pragma omp distribute. */
if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
|| gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
gimple_seq_add_stmt (ilist, omp_build_barrier (NULL_TREE));
}
/* If max_vf is non-zero, then we can use only a vectorization factor
up to the max_vf we chose. So stick it into the safelen clause. */
if (maybe_ne (sctx.max_vf, 0U))
{
tree c = omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_SAFELEN);
poly_uint64 safe_len;
if (c == NULL_TREE
|| (poly_int_tree_p (OMP_CLAUSE_SAFELEN_EXPR (c), &safe_len)
&& maybe_gt (safe_len, sctx.max_vf)))
{
c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
sctx.max_vf);
OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
gimple_omp_for_set_clauses (ctx->stmt, c);
}
}
}
/* Generate code to implement the LASTPRIVATE clauses. This is used for
both parallel and workshare constructs. PREDICATE may be NULL if it's
always true. */
static void
lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
omp_context *ctx)
{
tree x, c, label = NULL, orig_clauses = clauses;
bool par_clauses = false;
tree simduid = NULL, lastlane = NULL, simtcond = NULL, simtlast = NULL;
/* Early exit if there are no lastprivate or linear clauses. */
for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
|| (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
break;
if (clauses == NULL)
{
/* If this was a workshare clause, see if it had been combined
with its parallel. In that case, look for the clauses on the
parallel statement itself. */
if (is_parallel_ctx (ctx))
return;
ctx = ctx->outer;
if (ctx == NULL || !is_parallel_ctx (ctx))
return;
clauses = omp_find_clause (gimple_omp_parallel_clauses (ctx->stmt),
OMP_CLAUSE_LASTPRIVATE);
if (clauses == NULL)
return;
par_clauses = true;
}
bool maybe_simt = false;
if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
{
maybe_simt = omp_find_clause (orig_clauses, OMP_CLAUSE__SIMT_);
simduid = omp_find_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
if (simduid)
simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
}
if (predicate)
{
gcond *stmt;
tree label_true, arm1, arm2;
enum tree_code pred_code = TREE_CODE (predicate);
label = create_artificial_label (UNKNOWN_LOCATION);
label_true = create_artificial_label (UNKNOWN_LOCATION);
if (TREE_CODE_CLASS (pred_code) == tcc_comparison)
{
arm1 = TREE_OPERAND (predicate, 0);
arm2 = TREE_OPERAND (predicate, 1);
gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
}
else
{
arm1 = predicate;
gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
arm2 = boolean_false_node;
pred_code = NE_EXPR;
}
if (maybe_simt)
{
c = build2 (pred_code, boolean_type_node, arm1, arm2);
c = fold_convert (integer_type_node, c);
simtcond = create_tmp_var (integer_type_node);
gimplify_assign (simtcond, c, stmt_list);
gcall *g = gimple_build_call_internal (IFN_GOMP_SIMT_VOTE_ANY,
1, simtcond);
c = create_tmp_var (integer_type_node);
gimple_call_set_lhs (g, c);
gimple_seq_add_stmt (stmt_list, g);
stmt = gimple_build_cond (NE_EXPR, c, integer_zero_node,
label_true, label);
}
else
stmt = gimple_build_cond (pred_code, arm1, arm2, label_true, label);
gimple_seq_add_stmt (stmt_list, stmt);
gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
}
for (c = clauses; c ;)
{
tree var, new_var;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
|| (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
{
var = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)
&& is_taskloop_ctx (ctx))
{
gcc_checking_assert (ctx->outer && is_task_ctx (ctx->outer));
new_var = lookup_decl (var, ctx->outer);
}
else
{
new_var = lookup_decl (var, ctx);
/* Avoid uninitialized warnings for lastprivate and
for linear iterators. */
if (predicate
&& (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
|| OMP_CLAUSE_LINEAR_NO_COPYIN (c)))
TREE_NO_WARNING (new_var) = 1;
}
if (!maybe_simt && simduid && DECL_HAS_VALUE_EXPR_P (new_var))
{
tree val = DECL_VALUE_EXPR (new_var);
if (TREE_CODE (val) == ARRAY_REF
&& VAR_P (TREE_OPERAND (val, 0))
&& lookup_attribute ("omp simd array",
DECL_ATTRIBUTES (TREE_OPERAND (val,
0))))
{
if (lastlane == NULL)
{
lastlane = create_tmp_var (unsigned_type_node);
gcall *g
= gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
2, simduid,
TREE_OPERAND (val, 1));
gimple_call_set_lhs (g, lastlane);
gimple_seq_add_stmt (stmt_list, g);
}
new_var = build4 (ARRAY_REF, TREE_TYPE (val),
TREE_OPERAND (val, 0), lastlane,
NULL_TREE, NULL_TREE);
}
}
else if (maybe_simt)
{
tree val = (DECL_HAS_VALUE_EXPR_P (new_var)
? DECL_VALUE_EXPR (new_var)
: new_var);
if (simtlast == NULL)
{
simtlast = create_tmp_var (unsigned_type_node);
gcall *g = gimple_build_call_internal
(IFN_GOMP_SIMT_LAST_LANE, 1, simtcond);
gimple_call_set_lhs (g, simtlast);
gimple_seq_add_stmt (stmt_list, g);
}
x = build_call_expr_internal_loc
(UNKNOWN_LOCATION, IFN_GOMP_SIMT_XCHG_IDX,
TREE_TYPE (val), 2, val, simtlast);
new_var = unshare_expr (new_var);
gimplify_assign (new_var, x, stmt_list);
new_var = unshare_expr (new_var);
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
{
lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
gimple_seq_add_seq (stmt_list,
OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
{
lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
gimple_seq_add_seq (stmt_list,
OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
}
x = NULL_TREE;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_TASKLOOP_IV (c))
{
gcc_checking_assert (is_taskloop_ctx (ctx));
tree ovar = maybe_lookup_decl_in_outer_ctx (var,
ctx->outer->outer);
if (is_global_var (ovar))
x = ovar;
}
if (!x)
x = build_outer_var_ref (var, ctx, OMP_CLAUSE_LASTPRIVATE);
if (omp_is_reference (var))
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
gimplify_and_add (x, stmt_list);
}
c = OMP_CLAUSE_CHAIN (c);
if (c == NULL && !par_clauses)
{
/* If this was a workshare clause, see if it had been combined
with its parallel. In that case, continue looking for the
clauses also on the parallel statement itself. */
if (is_parallel_ctx (ctx))
break;
ctx = ctx->outer;
if (ctx == NULL || !is_parallel_ctx (ctx))
break;
c = omp_find_clause (gimple_omp_parallel_clauses (ctx->stmt),
OMP_CLAUSE_LASTPRIVATE);
par_clauses = true;
}
}
if (label)
gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
}
/* Lower the OpenACC reductions of CLAUSES for compute axis LEVEL
(which might be a placeholder). INNER is true if this is an inner
axis of a multi-axis loop. FORK and JOIN are (optional) fork and
join markers. Generate the before-loop forking sequence in
FORK_SEQ and the after-loop joining sequence to JOIN_SEQ. The
general form of these sequences is
GOACC_REDUCTION_SETUP
GOACC_FORK
GOACC_REDUCTION_INIT
...
GOACC_REDUCTION_FINI
GOACC_JOIN
GOACC_REDUCTION_TEARDOWN. */
static void
lower_oacc_reductions (location_t loc, tree clauses, tree level, bool inner,
gcall *fork, gcall *join, gimple_seq *fork_seq,
gimple_seq *join_seq, omp_context *ctx)
{
gimple_seq before_fork = NULL;
gimple_seq after_fork = NULL;
gimple_seq before_join = NULL;
gimple_seq after_join = NULL;
tree init_code = NULL_TREE, fini_code = NULL_TREE,
setup_code = NULL_TREE, teardown_code = NULL_TREE;
unsigned offset = 0;
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
{
tree orig = OMP_CLAUSE_DECL (c);
tree var = maybe_lookup_decl (orig, ctx);
tree ref_to_res = NULL_TREE;
tree incoming, outgoing, v1, v2, v3;
bool is_private = false;
enum tree_code rcode = OMP_CLAUSE_REDUCTION_CODE (c);
if (rcode == MINUS_EXPR)
rcode = PLUS_EXPR;
else if (rcode == TRUTH_ANDIF_EXPR)
rcode = BIT_AND_EXPR;
else if (rcode == TRUTH_ORIF_EXPR)
rcode = BIT_IOR_EXPR;
tree op = build_int_cst (unsigned_type_node, rcode);
if (!var)
var = orig;
incoming = outgoing = var;
if (!inner)
{
/* See if an outer construct also reduces this variable. */
omp_context *outer = ctx;
while (omp_context *probe = outer->outer)
{
enum gimple_code type = gimple_code (probe->stmt);
tree cls;
switch (type)
{
case GIMPLE_OMP_FOR:
cls = gimple_omp_for_clauses (probe->stmt);
break;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (probe->stmt)
!= GF_OMP_TARGET_KIND_OACC_PARALLEL)
goto do_lookup;
cls = gimple_omp_target_clauses (probe->stmt);
break;
default:
goto do_lookup;
}
outer = probe;
for (; cls; cls = OMP_CLAUSE_CHAIN (cls))
if (OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_REDUCTION
&& orig == OMP_CLAUSE_DECL (cls))
{
incoming = outgoing = lookup_decl (orig, probe);
goto has_outer_reduction;
}
else if ((OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_FIRSTPRIVATE
|| OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_PRIVATE)
&& orig == OMP_CLAUSE_DECL (cls))
{
is_private = true;
goto do_lookup;
}
}
do_lookup:
/* This is the outermost construct with this reduction,
see if there's a mapping for it. */
if (gimple_code (outer->stmt) == GIMPLE_OMP_TARGET
&& maybe_lookup_field (orig, outer) && !is_private)
{
ref_to_res = build_receiver_ref (orig, false, outer);
if (omp_is_reference (orig))
ref_to_res = build_simple_mem_ref (ref_to_res);
tree type = TREE_TYPE (var);
if (POINTER_TYPE_P (type))
type = TREE_TYPE (type);
outgoing = var;
incoming = omp_reduction_init_op (loc, rcode, type);
}
else
{
/* Try to look at enclosing contexts for reduction var,
use original if no mapping found. */
tree t = NULL_TREE;
omp_context *c = ctx->outer;
while (c && !t)
{
t = maybe_lookup_decl (orig, c);
c = c->outer;
}
incoming = outgoing = (t ? t : orig);
}
has_outer_reduction:;
}
if (!ref_to_res)
ref_to_res = integer_zero_node;
if (omp_is_reference (orig))
{
tree type = TREE_TYPE (var);
const char *id = IDENTIFIER_POINTER (DECL_NAME (var));
if (!inner)
{
tree x = create_tmp_var (TREE_TYPE (type), id);
gimplify_assign (var, build_fold_addr_expr (x), fork_seq);
}
v1 = create_tmp_var (type, id);
v2 = create_tmp_var (type, id);
v3 = create_tmp_var (type, id);
gimplify_assign (v1, var, fork_seq);
gimplify_assign (v2, var, fork_seq);
gimplify_assign (v3, var, fork_seq);
var = build_simple_mem_ref (var);
v1 = build_simple_mem_ref (v1);
v2 = build_simple_mem_ref (v2);
v3 = build_simple_mem_ref (v3);
outgoing = build_simple_mem_ref (outgoing);
if (!TREE_CONSTANT (incoming))
incoming = build_simple_mem_ref (incoming);
}
else
v1 = v2 = v3 = var;
/* Determine position in reduction buffer, which may be used
by target. The parser has ensured that this is not a
variable-sized type. */
fixed_size_mode mode
= as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (var)));
unsigned align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
offset = (offset + align - 1) & ~(align - 1);
tree off = build_int_cst (sizetype, offset);
offset += GET_MODE_SIZE (mode);
if (!init_code)
{
init_code = build_int_cst (integer_type_node,
IFN_GOACC_REDUCTION_INIT);
fini_code = build_int_cst (integer_type_node,
IFN_GOACC_REDUCTION_FINI);
setup_code = build_int_cst (integer_type_node,
IFN_GOACC_REDUCTION_SETUP);
teardown_code = build_int_cst (integer_type_node,
IFN_GOACC_REDUCTION_TEARDOWN);
}
tree setup_call
= build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
TREE_TYPE (var), 6, setup_code,
unshare_expr (ref_to_res),
incoming, level, op, off);
tree init_call
= build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
TREE_TYPE (var), 6, init_code,
unshare_expr (ref_to_res),
v1, level, op, off);
tree fini_call
= build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
TREE_TYPE (var), 6, fini_code,
unshare_expr (ref_to_res),
v2, level, op, off);
tree teardown_call
= build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
TREE_TYPE (var), 6, teardown_code,
ref_to_res, v3, level, op, off);
gimplify_assign (v1, setup_call, &before_fork);
gimplify_assign (v2, init_call, &after_fork);
gimplify_assign (v3, fini_call, &before_join);
gimplify_assign (outgoing, teardown_call, &after_join);
}
/* Now stitch things together. */
gimple_seq_add_seq (fork_seq, before_fork);
if (fork)
gimple_seq_add_stmt (fork_seq, fork);
gimple_seq_add_seq (fork_seq, after_fork);
gimple_seq_add_seq (join_seq, before_join);
if (join)
gimple_seq_add_stmt (join_seq, join);
gimple_seq_add_seq (join_seq, after_join);
}
/* Generate code to implement the REDUCTION clauses. */
static void
lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
{
gimple_seq sub_seq = NULL;
gimple *stmt;
tree x, c;
int count = 0;
/* OpenACC loop reductions are handled elsewhere. */
if (is_gimple_omp_oacc (ctx->stmt))
return;
/* SIMD reductions are handled in lower_rec_input_clauses. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
return;
/* First see if there is exactly one reduction clause. Use OMP_ATOMIC
update in that case, otherwise use a lock. */
for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
{
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
|| TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF)
{
/* Never use OMP_ATOMIC for array reductions or UDRs. */
count = -1;
break;
}
count++;
}
if (count == 0)
return;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree var, ref, new_var, orig_var;
enum tree_code code;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
continue;
enum omp_clause_code ccode = OMP_CLAUSE_REDUCTION;
orig_var = var = OMP_CLAUSE_DECL (c);
if (TREE_CODE (var) == MEM_REF)
{
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == POINTER_PLUS_EXPR)
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == ADDR_EXPR)
var = TREE_OPERAND (var, 0);
else
{
/* If this is a pointer or referenced based array
section, the var could be private in the outer
context e.g. on orphaned loop construct. Pretend this
is private variable's outer reference. */
ccode = OMP_CLAUSE_PRIVATE;
if (TREE_CODE (var) == INDIRECT_REF)
var = TREE_OPERAND (var, 0);
}
orig_var = var;
if (is_variable_sized (var))
{
gcc_assert (DECL_HAS_VALUE_EXPR_P (var));
var = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var) == INDIRECT_REF);
var = TREE_OPERAND (var, 0);
gcc_assert (DECL_P (var));
}
}
new_var = lookup_decl (var, ctx);
if (var == OMP_CLAUSE_DECL (c) && omp_is_reference (var))
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
ref = build_outer_var_ref (var, ctx, ccode);
code = OMP_CLAUSE_REDUCTION_CODE (c);
/* reduction(-:var) sums up the partial results, so it acts
identically to reduction(+:var). */
if (code == MINUS_EXPR)
code = PLUS_EXPR;
if (count == 1)
{
tree addr = build_fold_addr_expr_loc (clause_loc, ref);
addr = save_expr (addr);
ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
x = build2 (OMP_ATOMIC, void_type_node, addr, x);
gimplify_and_add (x, stmt_seqp);
return;
}
else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF)
{
tree d = OMP_CLAUSE_DECL (c);
tree type = TREE_TYPE (d);
tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
tree i = create_tmp_var (TREE_TYPE (v), NULL);
tree ptype = build_pointer_type (TREE_TYPE (type));
tree bias = TREE_OPERAND (d, 1);
d = TREE_OPERAND (d, 0);
if (TREE_CODE (d) == POINTER_PLUS_EXPR)
{
tree b = TREE_OPERAND (d, 1);
b = maybe_lookup_decl (b, ctx);
if (b == NULL)
{
b = TREE_OPERAND (d, 1);
b = maybe_lookup_decl_in_outer_ctx (b, ctx);
}
if (integer_zerop (bias))
bias = b;
else
{
bias = fold_convert_loc (clause_loc, TREE_TYPE (b), bias);
bias = fold_build2_loc (clause_loc, PLUS_EXPR,
TREE_TYPE (b), b, bias);
}
d = TREE_OPERAND (d, 0);
}
/* For ref build_outer_var_ref already performs this, so
only new_var needs a dereference. */
if (TREE_CODE (d) == INDIRECT_REF)
{
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
gcc_assert (omp_is_reference (var) && var == orig_var);
}
else if (TREE_CODE (d) == ADDR_EXPR)
{
if (orig_var == var)
{
new_var = build_fold_addr_expr (new_var);
ref = build_fold_addr_expr (ref);
}
}
else
{
gcc_assert (orig_var == var);
if (omp_is_reference (var))
ref = build_fold_addr_expr (ref);
}
if (DECL_P (v))
{
tree t = maybe_lookup_decl (v, ctx);
if (t)
v = t;
else
v = maybe_lookup_decl_in_outer_ctx (v, ctx);
gimplify_expr (&v, stmt_seqp, NULL, is_gimple_val, fb_rvalue);
}
if (!integer_zerop (bias))
{
bias = fold_convert_loc (clause_loc, sizetype, bias);
new_var = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
TREE_TYPE (new_var), new_var,
unshare_expr (bias));
ref = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
TREE_TYPE (ref), ref, bias);
}
new_var = fold_convert_loc (clause_loc, ptype, new_var);
ref = fold_convert_loc (clause_loc, ptype, ref);
tree m = create_tmp_var (ptype, NULL);
gimplify_assign (m, new_var, stmt_seqp);
new_var = m;
m = create_tmp_var (ptype, NULL);
gimplify_assign (m, ref, stmt_seqp);
ref = m;
gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), stmt_seqp);
tree body = create_artificial_label (UNKNOWN_LOCATION);
tree end = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (&sub_seq, gimple_build_label (body));
tree priv = build_simple_mem_ref_loc (clause_loc, new_var);
tree out = build_simple_mem_ref_loc (clause_loc, ref);
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
tree decl_placeholder
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c);
SET_DECL_VALUE_EXPR (placeholder, out);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
SET_DECL_VALUE_EXPR (decl_placeholder, priv);
DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 1;
lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
gimple_seq_add_seq (&sub_seq,
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = NULL;
}
else
{
x = build2 (code, TREE_TYPE (out), out, priv);
out = unshare_expr (out);
gimplify_assign (out, x, &sub_seq);
}
gimple *g = gimple_build_assign (new_var, POINTER_PLUS_EXPR, new_var,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (&sub_seq, g);
g = gimple_build_assign (ref, POINTER_PLUS_EXPR, ref,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (&sub_seq, g);
g = gimple_build_assign (i, PLUS_EXPR, i,
build_int_cst (TREE_TYPE (i), 1));
gimple_seq_add_stmt (&sub_seq, g);
g = gimple_build_cond (LE_EXPR, i, v, body, end);
gimple_seq_add_stmt (&sub_seq, g);
gimple_seq_add_stmt (&sub_seq, gimple_build_label (end));
}
else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
if (omp_is_reference (var)
&& !useless_type_conversion_p (TREE_TYPE (placeholder),
TREE_TYPE (ref)))
ref = build_fold_addr_expr_loc (clause_loc, ref);
SET_DECL_VALUE_EXPR (placeholder, ref);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
}
else
{
x = build2 (code, TREE_TYPE (ref), ref, new_var);
ref = build_outer_var_ref (var, ctx);
gimplify_assign (ref, x, &sub_seq);
}
}
stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
0);
gimple_seq_add_stmt (stmt_seqp, stmt);
gimple_seq_add_seq (stmt_seqp, sub_seq);
stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
0);
gimple_seq_add_stmt (stmt_seqp, stmt);
}
/* Generate code to implement the COPYPRIVATE clauses. */
static void
lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
omp_context *ctx)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree var, new_var, ref, x;
bool by_ref;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
continue;
var = OMP_CLAUSE_DECL (c);
by_ref = use_pointer_for_field (var, NULL);
ref = build_sender_ref (var, ctx);
x = new_var = lookup_decl_in_outer_ctx (var, ctx);
if (by_ref)
{
x = build_fold_addr_expr_loc (clause_loc, new_var);
x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
}
gimplify_assign (ref, x, slist);
ref = build_receiver_ref (var, false, ctx);
if (by_ref)
{
ref = fold_convert_loc (clause_loc,
build_pointer_type (TREE_TYPE (new_var)),
ref);
ref = build_fold_indirect_ref_loc (clause_loc, ref);
}
if (omp_is_reference (var))
{
ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
ref = build_simple_mem_ref_loc (clause_loc, ref);
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
}
x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
gimplify_and_add (x, rlist);
}
}
/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
and REDUCTION from the sender (aka parent) side. */
static void
lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
omp_context *ctx)
{
tree c, t;
int ignored_looptemp = 0;
bool is_taskloop = false;
/* For taskloop, ignore first two _looptemp_ clauses, those are initialized
by GOMP_taskloop. */
if (is_task_ctx (ctx) && gimple_omp_task_taskloop_p (ctx->stmt))
{
ignored_looptemp = 2;
is_taskloop = true;
}
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree val, ref, x, var;
bool by_ref, do_in = false, do_out = false;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
break;
continue;
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_REDUCTION:
break;
case OMP_CLAUSE_SHARED:
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
break;
continue;
case OMP_CLAUSE__LOOPTEMP_:
if (ignored_looptemp)
{
ignored_looptemp--;
continue;
}
break;
default:
continue;
}
val = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& TREE_CODE (val) == MEM_REF)
{
val = TREE_OPERAND (val, 0);
if (TREE_CODE (val) == POINTER_PLUS_EXPR)
val = TREE_OPERAND (val, 0);
if (TREE_CODE (val) == INDIRECT_REF
|| TREE_CODE (val) == ADDR_EXPR)
val = TREE_OPERAND (val, 0);
if (is_variable_sized (val))
continue;
}
/* For OMP_CLAUSE_SHARED_FIRSTPRIVATE, look beyond the
outer taskloop region. */
omp_context *ctx_for_o = ctx;
if (is_taskloop
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
ctx_for_o = ctx->outer;
var = lookup_decl_in_outer_ctx (val, ctx_for_o);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
&& is_global_var (var))
continue;
t = omp_member_access_dummy_var (var);
if (t)
{
var = DECL_VALUE_EXPR (var);
tree o = maybe_lookup_decl_in_outer_ctx (t, ctx_for_o);
if (o != t)
var = unshare_and_remap (var, t, o);
else
var = unshare_expr (var);
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
{
/* Handle taskloop firstprivate/lastprivate, where the
lastprivate on GIMPLE_OMP_TASK is represented as
OMP_CLAUSE_SHARED_FIRSTPRIVATE. */
tree f = lookup_sfield ((splay_tree_key) &DECL_UID (val), ctx);
x = omp_build_component_ref (ctx->sender_decl, f);
if (use_pointer_for_field (val, ctx))
var = build_fold_addr_expr (var);
gimplify_assign (x, var, ilist);
DECL_ABSTRACT_ORIGIN (f) = NULL;
continue;
}
if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
|| val == OMP_CLAUSE_DECL (c))
&& is_variable_sized (val))
continue;
by_ref = use_pointer_for_field (val, NULL);
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_FIRSTPRIVATE:
if (OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c)
&& !by_ref
&& is_task_ctx (ctx))
TREE_NO_WARNING (var) = 1;
do_in = true;
break;
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE__LOOPTEMP_:
do_in = true;
break;
case OMP_CLAUSE_LASTPRIVATE:
if (by_ref || omp_is_reference (val))
{
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
continue;
do_in = true;
}
else
{
do_out = true;
if (lang_hooks.decls.omp_private_outer_ref (val))
do_in = true;
}
break;
case OMP_CLAUSE_REDUCTION:
do_in = true;
if (val == OMP_CLAUSE_DECL (c))
do_out = !(by_ref || omp_is_reference (val));
else
by_ref = TREE_CODE (TREE_TYPE (val)) == ARRAY_TYPE;
break;
default:
gcc_unreachable ();
}
if (do_in)
{
ref = build_sender_ref (val, ctx);
x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
gimplify_assign (ref, x, ilist);
if (is_task_ctx (ctx))
DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
}
if (do_out)
{
ref = build_sender_ref (val, ctx);
gimplify_assign (var, ref, olist);
}
}
}
/* Generate code to implement SHARED from the sender (aka parent)
side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
list things that got automatically shared. */
static void
lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
{
tree var, ovar, nvar, t, f, x, record_type;
if (ctx->record_type == NULL)
return;
record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
{
ovar = DECL_ABSTRACT_ORIGIN (f);
if (!ovar || TREE_CODE (ovar) == FIELD_DECL)
continue;
nvar = maybe_lookup_decl (ovar, ctx);
if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
continue;
/* If CTX is a nested parallel directive. Find the immediately
enclosing parallel or workshare construct that contains a
mapping for OVAR. */
var = lookup_decl_in_outer_ctx (ovar, ctx);
t = omp_member_access_dummy_var (var);
if (t)
{
var = DECL_VALUE_EXPR (var);
tree o = maybe_lookup_decl_in_outer_ctx (t, ctx);
if (o != t)
var = unshare_and_remap (var, t, o);
else
var = unshare_expr (var);
}
if (use_pointer_for_field (ovar, ctx))
{
x = build_sender_ref (ovar, ctx);
var = build_fold_addr_expr (var);
gimplify_assign (x, var, ilist);
}
else
{
x = build_sender_ref (ovar, ctx);
gimplify_assign (x, var, ilist);
if (!TREE_READONLY (var)
/* We don't need to receive a new reference to a result
or parm decl. In fact we may not store to it as we will
invalidate any pending RSO and generate wrong gimple
during inlining. */
&& !((TREE_CODE (var) == RESULT_DECL
|| TREE_CODE (var) == PARM_DECL)
&& DECL_BY_REFERENCE (var)))
{
x = build_sender_ref (ovar, ctx);
gimplify_assign (var, x, olist);
}
}
}
}
/* Emit an OpenACC head marker call, encapulating the partitioning and
other information that must be processed by the target compiler.
Return the maximum number of dimensions the associated loop might
be partitioned over. */
static unsigned
lower_oacc_head_mark (location_t loc, tree ddvar, tree clauses,
gimple_seq *seq, omp_context *ctx)
{
unsigned levels = 0;
unsigned tag = 0;
tree gang_static = NULL_TREE;
auto_vec<tree, 5> args;
args.quick_push (build_int_cst
(integer_type_node, IFN_UNIQUE_OACC_HEAD_MARK));
args.quick_push (ddvar);
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_GANG:
tag |= OLF_DIM_GANG;
gang_static = OMP_CLAUSE_GANG_STATIC_EXPR (c);
/* static:* is represented by -1, and we can ignore it, as
scheduling is always static. */
if (gang_static && integer_minus_onep (gang_static))
gang_static = NULL_TREE;
levels++;
break;
case OMP_CLAUSE_WORKER:
tag |= OLF_DIM_WORKER;
levels++;
break;
case OMP_CLAUSE_VECTOR:
tag |= OLF_DIM_VECTOR;
levels++;
break;
case OMP_CLAUSE_SEQ:
tag |= OLF_SEQ;
break;
case OMP_CLAUSE_AUTO:
tag |= OLF_AUTO;
break;
case OMP_CLAUSE_INDEPENDENT:
tag |= OLF_INDEPENDENT;
break;
case OMP_CLAUSE_TILE:
tag |= OLF_TILE;
break;
default:
continue;
}
}
if (gang_static)
{
if (DECL_P (gang_static))
gang_static = build_outer_var_ref (gang_static, ctx);
tag |= OLF_GANG_STATIC;
}
/* In a parallel region, loops are implicitly INDEPENDENT. */
omp_context *tgt = enclosing_target_ctx (ctx);
if (!tgt || is_oacc_parallel (tgt))
tag |= OLF_INDEPENDENT;
if (tag & OLF_TILE)
/* Tiling could use all 3 levels. */
levels = 3;
else
{
/* A loop lacking SEQ, GANG, WORKER and/or VECTOR could be AUTO.
Ensure at least one level, or 2 for possible auto
partitioning */
bool maybe_auto = !(tag & (((GOMP_DIM_MASK (GOMP_DIM_MAX) - 1)
<< OLF_DIM_BASE) | OLF_SEQ));
if (levels < 1u + maybe_auto)
levels = 1u + maybe_auto;
}
args.quick_push (build_int_cst (integer_type_node, levels));
args.quick_push (build_int_cst (integer_type_node, tag));
if (gang_static)
args.quick_push (gang_static);
gcall *call = gimple_build_call_internal_vec (IFN_UNIQUE, args);
gimple_set_location (call, loc);
gimple_set_lhs (call, ddvar);
gimple_seq_add_stmt (seq, call);
return levels;
}
/* Emit an OpenACC lopp head or tail marker to SEQ. LEVEL is the
partitioning level of the enclosed region. */
static void
lower_oacc_loop_marker (location_t loc, tree ddvar, bool head,
tree tofollow, gimple_seq *seq)
{
int marker_kind = (head ? IFN_UNIQUE_OACC_HEAD_MARK
: IFN_UNIQUE_OACC_TAIL_MARK);
tree marker = build_int_cst (integer_type_node, marker_kind);
int nargs = 2 + (tofollow != NULL_TREE);
gcall *call = gimple_build_call_internal (IFN_UNIQUE, nargs,
marker, ddvar, tofollow);
gimple_set_location (call, loc);
gimple_set_lhs (call, ddvar);
gimple_seq_add_stmt (seq, call);
}
/* Generate the before and after OpenACC loop sequences. CLAUSES are
the loop clauses, from which we extract reductions. Initialize
HEAD and TAIL. */
static void
lower_oacc_head_tail (location_t loc, tree clauses,
gimple_seq *head, gimple_seq *tail, omp_context *ctx)
{
bool inner = false;
tree ddvar = create_tmp_var (integer_type_node, ".data_dep");
gimple_seq_add_stmt (head, gimple_build_assign (ddvar, integer_zero_node));
unsigned count = lower_oacc_head_mark (loc, ddvar, clauses, head, ctx);
tree fork_kind = build_int_cst (unsigned_type_node, IFN_UNIQUE_OACC_FORK);
tree join_kind = build_int_cst (unsigned_type_node, IFN_UNIQUE_OACC_JOIN);
gcc_assert (count);
for (unsigned done = 1; count; count--, done++)
{
gimple_seq fork_seq = NULL;
gimple_seq join_seq = NULL;
tree place = build_int_cst (integer_type_node, -1);
gcall *fork = gimple_build_call_internal (IFN_UNIQUE, 3,
fork_kind, ddvar, place);
gimple_set_location (fork, loc);
gimple_set_lhs (fork, ddvar);
gcall *join = gimple_build_call_internal (IFN_UNIQUE, 3,
join_kind, ddvar, place);
gimple_set_location (join, loc);
gimple_set_lhs (join, ddvar);
/* Mark the beginning of this level sequence. */
if (inner)
lower_oacc_loop_marker (loc, ddvar, true,
build_int_cst (integer_type_node, count),
&fork_seq);
lower_oacc_loop_marker (loc, ddvar, false,
build_int_cst (integer_type_node, done),
&join_seq);
lower_oacc_reductions (loc, clauses, place, inner,
fork, join, &fork_seq, &join_seq, ctx);
/* Append this level to head. */
gimple_seq_add_seq (head, fork_seq);
/* Prepend it to tail. */
gimple_seq_add_seq (&join_seq, *tail);
*tail = join_seq;
inner = true;
}
/* Mark the end of the sequence. */
lower_oacc_loop_marker (loc, ddvar, true, NULL_TREE, head);
lower_oacc_loop_marker (loc, ddvar, false, NULL_TREE, tail);
}
/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
catch handler and return it. This prevents programs from violating the
structured block semantics with throws. */
static gimple_seq
maybe_catch_exception (gimple_seq body)
{
gimple *g;
tree decl;
if (!flag_exceptions)
return body;
if (lang_hooks.eh_protect_cleanup_actions != NULL)
decl = lang_hooks.eh_protect_cleanup_actions ();
else
decl = builtin_decl_explicit (BUILT_IN_TRAP);
g = gimple_build_eh_must_not_throw (decl);
g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
GIMPLE_TRY_CATCH);
return gimple_seq_alloc_with_stmt (g);
}
/* Routines to lower OMP directives into OMP-GIMPLE. */
/* If ctx is a worksharing context inside of a cancellable parallel
region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
and conditional branch to parallel's cancel_label to handle
cancellation in the implicit barrier. */
static void
maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
{
gimple *omp_return = gimple_seq_last_stmt (*body);
gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
if (gimple_omp_return_nowait_p (omp_return))
return;
if (ctx->outer
&& gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
&& ctx->outer->cancellable)
{
tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
tree lhs = create_tmp_var (c_bool_type);
gimple_omp_return_set_lhs (omp_return, lhs);
tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
gimple *g = gimple_build_cond (NE_EXPR, lhs,
fold_convert (c_bool_type,
boolean_false_node),
ctx->outer->cancel_label, fallthru_label);
gimple_seq_add_stmt (body, g);
gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
}
}
/* Lower the OpenMP sections directive in the current statement in GSI_P.
CTX is the enclosing OMP context for the current statement. */
static void
lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block, control;
gimple_stmt_iterator tgsi;
gomp_sections *stmt;
gimple *t;
gbind *new_stmt, *bind;
gimple_seq ilist, dlist, olist, new_body;
stmt = as_a <gomp_sections *> (gsi_stmt (*gsi_p));
push_gimplify_context ();
dlist = NULL;
ilist = NULL;
lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
&ilist, &dlist, ctx, NULL);
new_body = gimple_omp_body (stmt);
gimple_omp_set_body (stmt, NULL);
tgsi = gsi_start (new_body);
for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
{
omp_context *sctx;
gimple *sec_start;
sec_start = gsi_stmt (tgsi);
sctx = maybe_lookup_ctx (sec_start);
gcc_assert (sctx);
lower_omp (gimple_omp_body_ptr (sec_start), sctx);
gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
GSI_CONTINUE_LINKING);
gimple_omp_set_body (sec_start, NULL);
if (gsi_one_before_end_p (tgsi))
{
gimple_seq l = NULL;
lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
&l, ctx);
gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
gimple_omp_section_set_last (sec_start);
}
gsi_insert_after (&tgsi, gimple_build_omp_return (false),
GSI_CONTINUE_LINKING);
}
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, new_body, block);
olist = NULL;
lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
block = make_node (BLOCK);
new_stmt = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, new_stmt, true);
pop_gimplify_context (new_stmt);
gimple_bind_append_vars (new_stmt, ctx->block_vars);
BLOCK_VARS (block) = gimple_bind_vars (bind);
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
new_body = NULL;
gimple_seq_add_seq (&new_body, ilist);
gimple_seq_add_stmt (&new_body, stmt);
gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
gimple_seq_add_stmt (&new_body, bind);
control = create_tmp_var (unsigned_type_node, ".section");
t = gimple_build_omp_continue (control, control);
gimple_omp_sections_set_control (stmt, control);
gimple_seq_add_stmt (&new_body, t);
gimple_seq_add_seq (&new_body, olist);
if (ctx->cancellable)
gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
gimple_seq_add_seq (&new_body, dlist);
new_body = maybe_catch_exception (new_body);
bool nowait = omp_find_clause (gimple_omp_sections_clauses (stmt),
OMP_CLAUSE_NOWAIT) != NULL_TREE;
t = gimple_build_omp_return (nowait);
gimple_seq_add_stmt (&new_body, t);
maybe_add_implicit_barrier_cancel (ctx, &new_body);
gimple_bind_set_body (new_stmt, new_body);
}
/* A subroutine of lower_omp_single. Expand the simple form of
a GIMPLE_OMP_SINGLE, without a copyprivate clause:
if (GOMP_single_start ())
BODY;
[ GOMP_barrier (); ] -> unless 'nowait' is present.
FIXME. It may be better to delay expanding the logic of this until
pass_expand_omp. The expanded logic may make the job more difficult
to a synchronization analysis pass. */
static void
lower_omp_single_simple (gomp_single *single_stmt, gimple_seq *pre_p)
{
location_t loc = gimple_location (single_stmt);
tree tlabel = create_artificial_label (loc);
tree flabel = create_artificial_label (loc);
gimple *call, *cond;
tree lhs, decl;
decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)));
call = gimple_build_call (decl, 0);
gimple_call_set_lhs (call, lhs);
gimple_seq_add_stmt (pre_p, call);
cond = gimple_build_cond (EQ_EXPR, lhs,
fold_convert_loc (loc, TREE_TYPE (lhs),
boolean_true_node),
tlabel, flabel);
gimple_seq_add_stmt (pre_p, cond);
gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
}
/* A subroutine of lower_omp_single. Expand the simple form of
a GIMPLE_OMP_SINGLE, with a copyprivate clause:
#pragma omp single copyprivate (a, b, c)
Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
{
if ((copyout_p = GOMP_single_copy_start ()) == NULL)
{
BODY;
copyout.a = a;
copyout.b = b;
copyout.c = c;
GOMP_single_copy_end (©out);
}
else
{
a = copyout_p->a;
b = copyout_p->b;
c = copyout_p->c;
}
GOMP_barrier ();
}
FIXME. It may be better to delay expanding the logic of this until
pass_expand_omp. The expanded logic may make the job more difficult
to a synchronization analysis pass. */
static void
lower_omp_single_copy (gomp_single *single_stmt, gimple_seq *pre_p,
omp_context *ctx)
{
tree ptr_type, t, l0, l1, l2, bfn_decl;
gimple_seq copyin_seq;
location_t loc = gimple_location (single_stmt);
ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
ptr_type = build_pointer_type (ctx->record_type);
ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
l0 = create_artificial_label (loc);
l1 = create_artificial_label (loc);
l2 = create_artificial_label (loc);
bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
t = build_call_expr_loc (loc, bfn_decl, 0);
t = fold_convert_loc (loc, ptr_type, t);
gimplify_assign (ctx->receiver_decl, t, pre_p);
t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
build_int_cst (ptr_type, 0));
t = build3 (COND_EXPR, void_type_node, t,
build_and_jump (&l0), build_and_jump (&l1));
gimplify_and_add (t, pre_p);
gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
copyin_seq = NULL;
lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
©in_seq, ctx);
t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
t = build_call_expr_loc (loc, bfn_decl, 1, t);
gimplify_and_add (t, pre_p);
t = build_and_jump (&l2);
gimplify_and_add (t, pre_p);
gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
gimple_seq_add_seq (pre_p, copyin_seq);
gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
}
/* Expand code for an OpenMP single directive. */
static void
lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block;
gomp_single *single_stmt = as_a <gomp_single *> (gsi_stmt (*gsi_p));
gbind *bind;
gimple_seq bind_body, bind_body_tail = NULL, dlist;
push_gimplify_context ();
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
bind_body = NULL;
dlist = NULL;
lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
&bind_body, &dlist, ctx, NULL);
lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
gimple_seq_add_stmt (&bind_body, single_stmt);
if (ctx->record_type)
lower_omp_single_copy (single_stmt, &bind_body, ctx);
else
lower_omp_single_simple (single_stmt, &bind_body);
gimple_omp_set_body (single_stmt, NULL);
gimple_seq_add_seq (&bind_body, dlist);
bind_body = maybe_catch_exception (bind_body);
bool nowait = omp_find_clause (gimple_omp_single_clauses (single_stmt),
OMP_CLAUSE_NOWAIT) != NULL_TREE;
gimple *g = gimple_build_omp_return (nowait);
gimple_seq_add_stmt (&bind_body_tail, g);
maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
if (ctx->record_type)
{
gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
tree clobber = build_constructor (ctx->record_type, NULL);
TREE_THIS_VOLATILE (clobber) = 1;
gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
clobber), GSI_SAME_STMT);
}
gimple_seq_add_seq (&bind_body, bind_body_tail);
gimple_bind_set_body (bind, bind_body);
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
}
/* Expand code for an OpenMP master directive. */
static void
lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block, lab = NULL, x, bfn_decl;
gimple *stmt = gsi_stmt (*gsi_p);
gbind *bind;
location_t loc = gimple_location (stmt);
gimple_seq tseq;
push_gimplify_context ();
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
x = build_call_expr_loc (loc, bfn_decl, 0);
x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
tseq = NULL;
gimplify_and_add (x, &tseq);
gimple_bind_add_seq (bind, tseq);
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
gimple_bind_add_stmt (bind, gimple_build_label (lab));
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
}
/* Expand code for an OpenMP taskgroup directive. */
static void
lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gimple *stmt = gsi_stmt (*gsi_p);
gcall *x;
gbind *bind;
tree block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
0);
gimple_bind_add_stmt (bind, x);
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
}
/* Fold the OMP_ORDERED_CLAUSES for the OMP_ORDERED in STMT if possible. */
static void
lower_omp_ordered_clauses (gimple_stmt_iterator *gsi_p, gomp_ordered *ord_stmt,
omp_context *ctx)
{
struct omp_for_data fd;
if (!ctx->outer || gimple_code (ctx->outer->stmt) != GIMPLE_OMP_FOR)
return;
unsigned int len = gimple_omp_for_collapse (ctx->outer->stmt);
struct omp_for_data_loop *loops = XALLOCAVEC (struct omp_for_data_loop, len);
omp_extract_for_data (as_a <gomp_for *> (ctx->outer->stmt), &fd, loops);
if (!fd.ordered)
return;
tree *list_p = gimple_omp_ordered_clauses_ptr (ord_stmt);
tree c = gimple_omp_ordered_clauses (ord_stmt);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
{
/* Merge depend clauses from multiple adjacent
#pragma omp ordered depend(sink:...) constructs
into one #pragma omp ordered depend(sink:...), so that
we can optimize them together. */
gimple_stmt_iterator gsi = *gsi_p;
gsi_next (&gsi);
while (!gsi_end_p (gsi))
{
gimple *stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt)
|| gimple_code (stmt) == GIMPLE_NOP)
{
gsi_next (&gsi);
continue;
}
if (gimple_code (stmt) != GIMPLE_OMP_ORDERED)
break;
gomp_ordered *ord_stmt2 = as_a <gomp_ordered *> (stmt);
c = gimple_omp_ordered_clauses (ord_stmt2);
if (c == NULL_TREE
|| OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
|| OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_SINK)
break;
while (*list_p)
list_p = &OMP_CLAUSE_CHAIN (*list_p);
*list_p = c;
gsi_remove (&gsi, true);
}
}
/* Canonicalize sink dependence clauses into one folded clause if
possible.
The basic algorithm is to create a sink vector whose first
element is the GCD of all the first elements, and whose remaining
elements are the minimum of the subsequent columns.
We ignore dependence vectors whose first element is zero because
such dependencies are known to be executed by the same thread.
We take into account the direction of the loop, so a minimum
becomes a maximum if the loop is iterating forwards. We also
ignore sink clauses where the loop direction is unknown, or where
the offsets are clearly invalid because they are not a multiple
of the loop increment.
For example:
#pragma omp for ordered(2)
for (i=0; i < N; ++i)
for (j=0; j < M; ++j)
{
#pragma omp ordered \
depend(sink:i-8,j-2) \
depend(sink:i,j-1) \ // Completely ignored because i+0.
depend(sink:i-4,j-3) \
depend(sink:i-6,j-4)
#pragma omp ordered depend(source)
}
Folded clause is:
depend(sink:-gcd(8,4,6),-min(2,3,4))
-or-
depend(sink:-2,-2)
*/
/* FIXME: Computing GCD's where the first element is zero is
non-trivial in the presence of collapsed loops. Do this later. */
if (fd.collapse > 1)
return;
wide_int *folded_deps = XALLOCAVEC (wide_int, 2 * len - 1);
/* wide_int is not a POD so it must be default-constructed. */
for (unsigned i = 0; i != 2 * len - 1; ++i)
new (static_cast<void*>(folded_deps + i)) wide_int ();
tree folded_dep = NULL_TREE;
/* TRUE if the first dimension's offset is negative. */
bool neg_offset_p = false;
list_p = gimple_omp_ordered_clauses_ptr (ord_stmt);
unsigned int i;
while ((c = *list_p) != NULL)
{
bool remove = false;
gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND);
if (OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_SINK)
goto next_ordered_clause;
tree vec;
for (vec = OMP_CLAUSE_DECL (c), i = 0;
vec && TREE_CODE (vec) == TREE_LIST;
vec = TREE_CHAIN (vec), ++i)
{
gcc_assert (i < len);
/* omp_extract_for_data has canonicalized the condition. */
gcc_assert (fd.loops[i].cond_code == LT_EXPR
|| fd.loops[i].cond_code == GT_EXPR);
bool forward = fd.loops[i].cond_code == LT_EXPR;
bool maybe_lexically_later = true;
/* While the committee makes up its mind, bail if we have any
non-constant steps. */
if (TREE_CODE (fd.loops[i].step) != INTEGER_CST)
goto lower_omp_ordered_ret;
tree itype = TREE_TYPE (TREE_VALUE (vec));
if (POINTER_TYPE_P (itype))
itype = sizetype;
wide_int offset = wide_int::from (wi::to_wide (TREE_PURPOSE (vec)),
TYPE_PRECISION (itype),
TYPE_SIGN (itype));
/* Ignore invalid offsets that are not multiples of the step. */
if (!wi::multiple_of_p (wi::abs (offset),
wi::abs (wi::to_wide (fd.loops[i].step)),
UNSIGNED))
{
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"ignoring sink clause with offset that is not "
"a multiple of the loop step");
remove = true;
goto next_ordered_clause;
}
/* Calculate the first dimension. The first dimension of
the folded dependency vector is the GCD of the first
elements, while ignoring any first elements whose offset
is 0. */
if (i == 0)
{
/* Ignore dependence vectors whose first dimension is 0. */
if (offset == 0)
{
remove = true;
goto next_ordered_clause;
}
else
{
if (!TYPE_UNSIGNED (itype) && (forward ^ wi::neg_p (offset)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"first offset must be in opposite direction "
"of loop iterations");
goto lower_omp_ordered_ret;
}
if (forward)
offset = -offset;
neg_offset_p = forward;
/* Initialize the first time around. */
if (folded_dep == NULL_TREE)
{
folded_dep = c;
folded_deps[0] = offset;
}
else
folded_deps[0] = wi::gcd (folded_deps[0],
offset, UNSIGNED);
}
}
/* Calculate minimum for the remaining dimensions. */
else
{
folded_deps[len + i - 1] = offset;
if (folded_dep == c)
folded_deps[i] = offset;
else if (maybe_lexically_later
&& !wi::eq_p (folded_deps[i], offset))
{
if (forward ^ wi::gts_p (folded_deps[i], offset))
{
unsigned int j;
folded_dep = c;
for (j = 1; j <= i; j++)
folded_deps[j] = folded_deps[len + j - 1];
}
else
maybe_lexically_later = false;
}
}
}
gcc_assert (i == len);
remove = true;
next_ordered_clause:
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
if (folded_dep)
{
if (neg_offset_p)
folded_deps[0] = -folded_deps[0];
tree itype = TREE_TYPE (TREE_VALUE (OMP_CLAUSE_DECL (folded_dep)));
if (POINTER_TYPE_P (itype))
itype = sizetype;
TREE_PURPOSE (OMP_CLAUSE_DECL (folded_dep))
= wide_int_to_tree (itype, folded_deps[0]);
OMP_CLAUSE_CHAIN (folded_dep) = gimple_omp_ordered_clauses (ord_stmt);
*gimple_omp_ordered_clauses_ptr (ord_stmt) = folded_dep;
}
lower_omp_ordered_ret:
/* Ordered without clauses is #pragma omp threads, while we want
a nop instead if we remove all clauses. */
if (gimple_omp_ordered_clauses (ord_stmt) == NULL_TREE)
gsi_replace (gsi_p, gimple_build_nop (), true);
}
/* Expand code for an OpenMP ordered directive. */
static void
lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block;
gimple *stmt = gsi_stmt (*gsi_p), *g;
gomp_ordered *ord_stmt = as_a <gomp_ordered *> (stmt);
gcall *x;
gbind *bind;
bool simd = omp_find_clause (gimple_omp_ordered_clauses (ord_stmt),
OMP_CLAUSE_SIMD);
/* FIXME: this should check presence of OMP_CLAUSE__SIMT_ on the enclosing
loop. */
bool maybe_simt
= simd && omp_maybe_offloaded_ctx (ctx) && omp_max_simt_vf () > 1;
bool threads = omp_find_clause (gimple_omp_ordered_clauses (ord_stmt),
OMP_CLAUSE_THREADS);
if (omp_find_clause (gimple_omp_ordered_clauses (ord_stmt),
OMP_CLAUSE_DEPEND))
{
/* FIXME: This is needs to be moved to the expansion to verify various
conditions only testable on cfg with dominators computed, and also
all the depend clauses to be merged still might need to be available
for the runtime checks. */
if (0)
lower_omp_ordered_clauses (gsi_p, ord_stmt, ctx);
return;
}
push_gimplify_context ();
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
if (simd)
{
x = gimple_build_call_internal (IFN_GOMP_SIMD_ORDERED_START, 1,
build_int_cst (NULL_TREE, threads));
cfun->has_simduid_loops = true;
}
else
x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
0);
gimple_bind_add_stmt (bind, x);
tree counter = NULL_TREE, test = NULL_TREE, body = NULL_TREE;
if (maybe_simt)
{
counter = create_tmp_var (integer_type_node);
g = gimple_build_call_internal (IFN_GOMP_SIMT_LANE, 0);
gimple_call_set_lhs (g, counter);
gimple_bind_add_stmt (bind, g);
body = create_artificial_label (UNKNOWN_LOCATION);
test = create_artificial_label (UNKNOWN_LOCATION);
gimple_bind_add_stmt (bind, gimple_build_label (body));
tree simt_pred = create_tmp_var (integer_type_node);
g = gimple_build_call_internal (IFN_GOMP_SIMT_ORDERED_PRED, 1, counter);
gimple_call_set_lhs (g, simt_pred);
gimple_bind_add_stmt (bind, g);
tree t = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (EQ_EXPR, simt_pred, integer_zero_node, t, test);
gimple_bind_add_stmt (bind, g);
gimple_bind_add_stmt (bind, gimple_build_label (t));
}
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
if (maybe_simt)
{
gimple_bind_add_stmt (bind, gimple_build_label (test));
g = gimple_build_assign (counter, MINUS_EXPR, counter, integer_one_node);
gimple_bind_add_stmt (bind, g);
tree c = build2 (GE_EXPR, boolean_type_node, counter, integer_zero_node);
tree nonneg = create_tmp_var (integer_type_node);
gimple_seq tseq = NULL;
gimplify_assign (nonneg, fold_convert (integer_type_node, c), &tseq);
gimple_bind_add_seq (bind, tseq);
g = gimple_build_call_internal (IFN_GOMP_SIMT_VOTE_ANY, 1, nonneg);
gimple_call_set_lhs (g, nonneg);
gimple_bind_add_stmt (bind, g);
tree end = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (NE_EXPR, nonneg, integer_zero_node, body, end);
gimple_bind_add_stmt (bind, g);
gimple_bind_add_stmt (bind, gimple_build_label (end));
}
if (simd)
x = gimple_build_call_internal (IFN_GOMP_SIMD_ORDERED_END, 1,
build_int_cst (NULL_TREE, threads));
else
x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END),
0);
gimple_bind_add_stmt (bind, x);
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = gimple_bind_vars (bind);
}
/* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
substitution of a couple of function calls. But in the NAMED case,
requires that languages coordinate a symbol name. It is therefore
best put here in common code. */
static GTY(()) hash_map<tree, tree> *critical_name_mutexes;
static void
lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block;
tree name, lock, unlock;
gomp_critical *stmt = as_a <gomp_critical *> (gsi_stmt (*gsi_p));
gbind *bind;
location_t loc = gimple_location (stmt);
gimple_seq tbody;
name = gimple_omp_critical_name (stmt);
if (name)
{
tree decl;
if (!critical_name_mutexes)
critical_name_mutexes = hash_map<tree, tree>::create_ggc (10);
tree *n = critical_name_mutexes->get (name);
if (n == NULL)
{
char *new_str;
decl = create_tmp_var_raw (ptr_type_node);
new_str = ACONCAT ((".gomp_critical_user_",
IDENTIFIER_POINTER (name), NULL));
DECL_NAME (decl) = get_identifier (new_str);
TREE_PUBLIC (decl) = 1;
TREE_STATIC (decl) = 1;
DECL_COMMON (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 1;
varpool_node::finalize_decl (decl);
critical_name_mutexes->put (name, decl);
}
else
decl = *n;
/* If '#pragma omp critical' is inside offloaded region or
inside function marked as offloadable, the symbol must be
marked as offloadable too. */
omp_context *octx;
if (cgraph_node::get (current_function_decl)->offloadable)
varpool_node::get_create (decl)->offloadable = 1;
else
for (octx = ctx->outer; octx; octx = octx->outer)
if (is_gimple_omp_offloaded (octx->stmt))
{
varpool_node::get_create (decl)->offloadable = 1;
break;
}
lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
lock = build_call_expr_loc (loc, lock, 1,
build_fold_addr_expr_loc (loc, decl));
unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
unlock = build_call_expr_loc (loc, unlock, 1,
build_fold_addr_expr_loc (loc, decl));
}
else
{
lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
lock = build_call_expr_loc (loc, lock, 0);
unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
unlock = build_call_expr_loc (loc, unlock, 0);
}
push_gimplify_context ();
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
tbody = gimple_bind_body (bind);
gimplify_and_add (lock, &tbody);
gimple_bind_set_body (bind, tbody);
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
tbody = gimple_bind_body (bind);
gimplify_and_add (unlock, &tbody);
gimple_bind_set_body (bind, tbody);
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = gimple_bind_vars (bind);
}
/* A subroutine of lower_omp_for. Generate code to emit the predicate
for a lastprivate clause. Given a loop control predicate of (V
cond N2), we gate the clause on (!(V cond N2)). The lowered form
is appended to *DLIST, iterator initialization is appended to
*BODY_P. */
static void
lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
gimple_seq *dlist, struct omp_context *ctx)
{
tree clauses, cond, vinit;
enum tree_code cond_code;
gimple_seq stmts;
cond_code = fd->loop.cond_code;
cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
/* When possible, use a strict equality expression. This can let VRP
type optimizations deduce the value and remove a copy. */
if (tree_fits_shwi_p (fd->loop.step))
{
HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
if (step == 1 || step == -1)
cond_code = EQ_EXPR;
}
if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_GRID_LOOP
|| gimple_omp_for_grid_phony (fd->for_stmt))
cond = omp_grid_lastprivate_predicate (fd);
else
{
tree n2 = fd->loop.n2;
if (fd->collapse > 1
&& TREE_CODE (n2) != INTEGER_CST
&& gimple_omp_for_combined_into_p (fd->for_stmt))
{
struct omp_context *taskreg_ctx = NULL;
if (gimple_code (ctx->outer->stmt) == GIMPLE_OMP_FOR)
{
gomp_for *gfor = as_a <gomp_for *> (ctx->outer->stmt);
if (gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_FOR
|| gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_DISTRIBUTE)
{
if (gimple_omp_for_combined_into_p (gfor))
{
gcc_assert (ctx->outer->outer
&& is_parallel_ctx (ctx->outer->outer));
taskreg_ctx = ctx->outer->outer;
}
else
{
struct omp_for_data outer_fd;
omp_extract_for_data (gfor, &outer_fd, NULL);
n2 = fold_convert (TREE_TYPE (n2), outer_fd.loop.n2);
}
}
else if (gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_TASKLOOP)
taskreg_ctx = ctx->outer->outer;
}
else if (is_taskreg_ctx (ctx->outer))
taskreg_ctx = ctx->outer;
if (taskreg_ctx)
{
int i;
tree taskreg_clauses
= gimple_omp_taskreg_clauses (taskreg_ctx->stmt);
tree innerc = omp_find_clause (taskreg_clauses,
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
for (i = 0; i < fd->collapse; i++)
{
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
}
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
if (innerc)
n2 = fold_convert (TREE_TYPE (n2),
lookup_decl (OMP_CLAUSE_DECL (innerc),
taskreg_ctx));
}
}
cond = build2 (cond_code, boolean_type_node, fd->loop.v, n2);
}
clauses = gimple_omp_for_clauses (fd->for_stmt);
stmts = NULL;
lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
if (!gimple_seq_empty_p (stmts))
{
gimple_seq_add_seq (&stmts, *dlist);
*dlist = stmts;
/* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
vinit = fd->loop.n1;
if (cond_code == EQ_EXPR
&& tree_fits_shwi_p (fd->loop.n2)
&& ! integer_zerop (fd->loop.n2))
vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
else
vinit = unshare_expr (vinit);
/* Initialize the iterator variable, so that threads that don't execute
any iterations don't execute the lastprivate clauses by accident. */
gimplify_assign (fd->loop.v, vinit, body_p);
}
}
/* Lower code for an OMP loop directive. */
static void
lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree *rhs_p, block;
struct omp_for_data fd, *fdp = NULL;
gomp_for *stmt = as_a <gomp_for *> (gsi_stmt (*gsi_p));
gbind *new_stmt;
gimple_seq omp_for_body, body, dlist;
gimple_seq oacc_head = NULL, oacc_tail = NULL;
size_t i;
push_gimplify_context ();
lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
block = make_node (BLOCK);
new_stmt = gimple_build_bind (NULL, NULL, block);
/* Replace at gsi right away, so that 'stmt' is no member
of a sequence anymore as we're going to add to a different
one below. */
gsi_replace (gsi_p, new_stmt, true);
/* Move declaration of temporaries in the loop body before we make
it go away. */
omp_for_body = gimple_omp_body (stmt);
if (!gimple_seq_empty_p (omp_for_body)
&& gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
{
gbind *inner_bind
= as_a <gbind *> (gimple_seq_first_stmt (omp_for_body));
tree vars = gimple_bind_vars (inner_bind);
gimple_bind_append_vars (new_stmt, vars);
/* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
keep them on the inner_bind and it's block. */
gimple_bind_set_vars (inner_bind, NULL_TREE);
if (gimple_bind_block (inner_bind))
BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
}
if (gimple_omp_for_combined_into_p (stmt))
{
omp_extract_for_data (stmt, &fd, NULL);
fdp = &fd;
/* We need two temporaries with fd.loop.v type (istart/iend)
and then (fd.collapse - 1) temporaries with the same
type for count2 ... countN-1 vars if not constant. */
size_t count = 2;
tree type = fd.iter_type;
if (fd.collapse > 1
&& TREE_CODE (fd.loop.n2) != INTEGER_CST)
count += fd.collapse - 1;
bool taskreg_for
= (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR
|| gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_TASKLOOP);
tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
tree simtc = NULL;
tree clauses = *pc;
if (taskreg_for)
outerc
= omp_find_clause (gimple_omp_taskreg_clauses (ctx->outer->stmt),
OMP_CLAUSE__LOOPTEMP_);
if (ctx->simt_stmt)
simtc = omp_find_clause (gimple_omp_for_clauses (ctx->simt_stmt),
OMP_CLAUSE__LOOPTEMP_);
for (i = 0; i < count; i++)
{
tree temp;
if (taskreg_for)
{
gcc_assert (outerc);
temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
outerc = omp_find_clause (OMP_CLAUSE_CHAIN (outerc),
OMP_CLAUSE__LOOPTEMP_);
}
else
{
/* If there are 2 adjacent SIMD stmts, one with _simt_
clause, another without, make sure they have the same
decls in _looptemp_ clauses, because the outer stmt
they are combined into will look up just one inner_stmt. */
if (ctx->simt_stmt)
temp = OMP_CLAUSE_DECL (simtc);
else
temp = create_tmp_var (type);
insert_decl_map (&ctx->outer->cb, temp, temp);
}
*pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
OMP_CLAUSE_DECL (*pc) = temp;
pc = &OMP_CLAUSE_CHAIN (*pc);
if (ctx->simt_stmt)
simtc = omp_find_clause (OMP_CLAUSE_CHAIN (simtc),
OMP_CLAUSE__LOOPTEMP_);
}
*pc = clauses;
}
/* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
dlist = NULL;
body = NULL;
lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
fdp);
gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
lower_omp (gimple_omp_body_ptr (stmt), ctx);
/* Lower the header expressions. At this point, we can assume that
the header is of the form:
#pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
We just need to make sure that VAL1, VAL2 and VAL3 are lowered
using the .omp_data_s mapping, if needed. */
for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
{
rhs_p = gimple_omp_for_initial_ptr (stmt, i);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, &body);
else if (TREE_CODE (*rhs_p) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (*rhs_p);
rhs_p = gimple_omp_for_final_ptr (stmt, i);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, &body);
else if (TREE_CODE (*rhs_p) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (*rhs_p);
rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, &body);
}
/* Once lowered, extract the bounds and clauses. */
omp_extract_for_data (stmt, &fd, NULL);
if (is_gimple_omp_oacc (ctx->stmt)
&& !ctx_in_oacc_kernels_region (ctx))
lower_oacc_head_tail (gimple_location (stmt),
gimple_omp_for_clauses (stmt),
&oacc_head, &oacc_tail, ctx);
/* Add OpenACC partitioning and reduction markers just before the loop. */
if (oacc_head)
gimple_seq_add_seq (&body, oacc_head);
lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
{
OMP_CLAUSE_DECL (c) = lookup_decl (OMP_CLAUSE_DECL (c), ctx);
if (DECL_P (OMP_CLAUSE_LINEAR_STEP (c)))
OMP_CLAUSE_LINEAR_STEP (c)
= maybe_lookup_decl_in_outer_ctx (OMP_CLAUSE_LINEAR_STEP (c),
ctx);
}
bool phony_loop = (gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_GRID_LOOP
&& gimple_omp_for_grid_phony (stmt));
if (!phony_loop)
gimple_seq_add_stmt (&body, stmt);
gimple_seq_add_seq (&body, gimple_omp_body (stmt));
if (!phony_loop)
gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
fd.loop.v));
/* After the loop, add exit clauses. */
lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
if (ctx->cancellable)
gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
gimple_seq_add_seq (&body, dlist);
body = maybe_catch_exception (body);
if (!phony_loop)
{
/* Region exit marker goes at the end of the loop body. */
gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
maybe_add_implicit_barrier_cancel (ctx, &body);
}
/* Add OpenACC joining and reduction markers just after the loop. */
if (oacc_tail)
gimple_seq_add_seq (&body, oacc_tail);
pop_gimplify_context (new_stmt);
gimple_bind_append_vars (new_stmt, ctx->block_vars);
maybe_remove_omp_member_access_dummy_vars (new_stmt);
BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
gimple_bind_set_body (new_stmt, body);
gimple_omp_set_body (stmt, NULL);
gimple_omp_for_set_pre_body (stmt, NULL);
}
/* Callback for walk_stmts. Check if the current statement only contains
GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
static tree
check_combined_parallel (gimple_stmt_iterator *gsi_p,
bool *handled_ops_p,
struct walk_stmt_info *wi)
{
int *info = (int *) wi->info;
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_DEBUG:
break;
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_SECTIONS:
*info = *info == 0 ? 1 : -1;
break;
default:
*info = -1;
break;
}
return NULL;
}
struct omp_taskcopy_context
{
/* This field must be at the beginning, as we do "inheritance": Some
callback functions for tree-inline.c (e.g., omp_copy_decl)
receive a copy_body_data pointer that is up-casted to an
omp_context pointer. */
copy_body_data cb;
omp_context *ctx;
};
static tree
task_copyfn_copy_decl (tree var, copy_body_data *cb)
{
struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
return create_tmp_var (TREE_TYPE (var));
return var;
}
static tree
task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
{
tree name, new_fields = NULL, type, f;
type = lang_hooks.types.make_type (RECORD_TYPE);
name = DECL_NAME (TYPE_NAME (orig_type));
name = build_decl (gimple_location (tcctx->ctx->stmt),
TYPE_DECL, name, type);
TYPE_NAME (type) = name;
for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
{
tree new_f = copy_node (f);
DECL_CONTEXT (new_f) = type;
TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
TREE_CHAIN (new_f) = new_fields;
walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
&tcctx->cb, NULL);
new_fields = new_f;
tcctx->cb.decl_map->put (f, new_f);
}
TYPE_FIELDS (type) = nreverse (new_fields);
layout_type (type);
return type;
}
/* Create task copyfn. */
static void
create_task_copyfn (gomp_task *task_stmt, omp_context *ctx)
{
struct function *child_cfun;
tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
tree record_type, srecord_type, bind, list;
bool record_needs_remap = false, srecord_needs_remap = false;
splay_tree_node n;
struct omp_taskcopy_context tcctx;
location_t loc = gimple_location (task_stmt);
size_t looptempno = 0;
child_fn = gimple_omp_task_copy_fn (task_stmt);
child_cfun = DECL_STRUCT_FUNCTION (child_fn);
gcc_assert (child_cfun->cfg == NULL);
DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
/* Reset DECL_CONTEXT on function arguments. */
for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
DECL_CONTEXT (t) = child_fn;
/* Populate the function. */
push_gimplify_context ();
push_cfun (child_cfun);
bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
list = NULL;
DECL_SAVED_TREE (child_fn) = bind;
DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
/* Remap src and dst argument types if needed. */
record_type = ctx->record_type;
srecord_type = ctx->srecord_type;
for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
{
record_needs_remap = true;
break;
}
for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
{
srecord_needs_remap = true;
break;
}
if (record_needs_remap || srecord_needs_remap)
{
memset (&tcctx, '\0', sizeof (tcctx));
tcctx.cb.src_fn = ctx->cb.src_fn;
tcctx.cb.dst_fn = child_fn;
tcctx.cb.src_node = cgraph_node::get (tcctx.cb.src_fn);
gcc_checking_assert (tcctx.cb.src_node);
tcctx.cb.dst_node = tcctx.cb.src_node;
tcctx.cb.src_cfun = ctx->cb.src_cfun;
tcctx.cb.copy_decl = task_copyfn_copy_decl;
tcctx.cb.eh_lp_nr = 0;
tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
tcctx.cb.decl_map = new hash_map<tree, tree>;
tcctx.ctx = ctx;
if (record_needs_remap)
record_type = task_copyfn_remap_type (&tcctx, record_type);
if (srecord_needs_remap)
srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
}
else
tcctx.cb.decl_map = NULL;
arg = DECL_ARGUMENTS (child_fn);
TREE_TYPE (arg) = build_pointer_type (record_type);
sarg = DECL_CHAIN (arg);
TREE_TYPE (sarg) = build_pointer_type (srecord_type);
/* First pass: initialize temporaries used in record_type and srecord_type
sizes and field offsets. */
if (tcctx.cb.decl_map)
for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
tree *p;
decl = OMP_CLAUSE_DECL (c);
p = tcctx.cb.decl_map->get (decl);
if (p == NULL)
continue;
n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
sf = (tree) n->value;
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
append_to_statement_list (t, &list);
}
/* Second pass: copy shared var pointers and copy construct non-VLA
firstprivate vars. */
for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
splay_tree_key key;
case OMP_CLAUSE_SHARED:
decl = OMP_CLAUSE_DECL (c);
key = (splay_tree_key) decl;
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
key = (splay_tree_key) &DECL_UID (decl);
n = splay_tree_lookup (ctx->field_map, key);
if (n == NULL)
break;
f = (tree) n->value;
if (tcctx.cb.decl_map)
f = *tcctx.cb.decl_map->get (f);
n = splay_tree_lookup (ctx->sfield_map, key);
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
append_to_statement_list (t, &list);
break;
case OMP_CLAUSE__LOOPTEMP_:
/* Fields for first two _looptemp_ clauses are initialized by
GOMP_taskloop*, the rest are handled like firstprivate. */
if (looptempno < 2)
{
looptempno++;
break;
}
/* FALLTHRU */
case OMP_CLAUSE_FIRSTPRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (is_variable_sized (decl))
break;
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
if (n == NULL)
break;
f = (tree) n->value;
if (tcctx.cb.decl_map)
f = *tcctx.cb.decl_map->get (f);
n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
if (n != NULL)
{
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
if (use_pointer_for_field (decl, NULL) || omp_is_reference (decl))
src = build_simple_mem_ref_loc (loc, src);
}
else
src = decl;
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE__LOOPTEMP_)
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
else
t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
append_to_statement_list (t, &list);
break;
case OMP_CLAUSE_PRIVATE:
if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
break;
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
f = (tree) n->value;
if (tcctx.cb.decl_map)
f = *tcctx.cb.decl_map->get (f);
n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
if (n != NULL)
{
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
if (use_pointer_for_field (decl, NULL))
src = build_simple_mem_ref_loc (loc, src);
}
else
src = decl;
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
append_to_statement_list (t, &list);
break;
default:
break;
}
/* Last pass: handle VLA firstprivates. */
if (tcctx.cb.decl_map)
for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
tree ind, ptr, df;
decl = OMP_CLAUSE_DECL (c);
if (!is_variable_sized (decl))
continue;
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
if (n == NULL)
continue;
f = (tree) n->value;
f = *tcctx.cb.decl_map->get (f);
gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
ind = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
n = splay_tree_lookup (ctx->sfield_map,
(splay_tree_key) TREE_OPERAND (ind, 0));
sf = (tree) n->value;
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
src = build_simple_mem_ref_loc (loc, src);
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
append_to_statement_list (t, &list);
n = splay_tree_lookup (ctx->field_map,
(splay_tree_key) TREE_OPERAND (ind, 0));
df = (tree) n->value;
df = *tcctx.cb.decl_map->get (df);
ptr = build_simple_mem_ref_loc (loc, arg);
ptr = omp_build_component_ref (ptr, df);
t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
build_fold_addr_expr_loc (loc, dst));
append_to_statement_list (t, &list);
}
t = build1 (RETURN_EXPR, void_type_node, NULL);
append_to_statement_list (t, &list);
if (tcctx.cb.decl_map)
delete tcctx.cb.decl_map;
pop_gimplify_context (NULL);
BIND_EXPR_BODY (bind) = list;
pop_cfun ();
}
static void
lower_depend_clauses (tree *pclauses, gimple_seq *iseq, gimple_seq *oseq)
{
tree c, clauses;
gimple *g;
size_t n_in = 0, n_out = 0, idx = 2, i;
clauses = omp_find_clause (*pclauses, OMP_CLAUSE_DEPEND);
gcc_assert (clauses);
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
switch (OMP_CLAUSE_DEPEND_KIND (c))
{
case OMP_CLAUSE_DEPEND_IN:
n_in++;
break;
case OMP_CLAUSE_DEPEND_OUT:
case OMP_CLAUSE_DEPEND_INOUT:
n_out++;
break;
case OMP_CLAUSE_DEPEND_SOURCE:
case OMP_CLAUSE_DEPEND_SINK:
/* FALLTHRU */
default:
gcc_unreachable ();
}
tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
tree array = create_tmp_var (type);
TREE_ADDRESSABLE (array) = 1;
tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
NULL_TREE);
g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
gimple_seq_add_stmt (iseq, g);
r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
NULL_TREE);
g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
gimple_seq_add_stmt (iseq, g);
for (i = 0; i < 2; i++)
{
if ((i ? n_in : n_out) == 0)
continue;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
{
tree t = OMP_CLAUSE_DECL (c);
t = fold_convert (ptr_type_node, t);
gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
NULL_TREE, NULL_TREE);
g = gimple_build_assign (r, t);
gimple_seq_add_stmt (iseq, g);
}
}
c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
OMP_CLAUSE_CHAIN (c) = *pclauses;
*pclauses = c;
tree clobber = build_constructor (type, NULL);
TREE_THIS_VOLATILE (clobber) = 1;
g = gimple_build_assign (array, clobber);
gimple_seq_add_stmt (oseq, g);
}
/* Lower the OpenMP parallel or task directive in the current statement
in GSI_P. CTX holds context information for the directive. */
static void
lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree clauses;
tree child_fn, t;
gimple *stmt = gsi_stmt (*gsi_p);
gbind *par_bind, *bind, *dep_bind = NULL;
gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
location_t loc = gimple_location (stmt);
clauses = gimple_omp_taskreg_clauses (stmt);
par_bind
= as_a <gbind *> (gimple_seq_first_stmt (gimple_omp_body (stmt)));
par_body = gimple_bind_body (par_bind);
child_fn = ctx->cb.dst_fn;
if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
&& !gimple_omp_parallel_combined_p (stmt))
{
struct walk_stmt_info wi;
int ws_num = 0;
memset (&wi, 0, sizeof (wi));
wi.info = &ws_num;
wi.val_only = true;
walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
if (ws_num == 1)
gimple_omp_parallel_set_combined_p (stmt, true);
}
gimple_seq dep_ilist = NULL;
gimple_seq dep_olist = NULL;
if (gimple_code (stmt) == GIMPLE_OMP_TASK
&& omp_find_clause (clauses, OMP_CLAUSE_DEPEND))
{
push_gimplify_context ();
dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
lower_depend_clauses (gimple_omp_task_clauses_ptr (stmt),
&dep_ilist, &dep_olist);
}
if (ctx->srecord_type)
create_task_copyfn (as_a <gomp_task *> (stmt), ctx);
push_gimplify_context ();
par_olist = NULL;
par_ilist = NULL;
par_rlist = NULL;
bool phony_construct = gimple_code (stmt) == GIMPLE_OMP_PARALLEL
&& gimple_omp_parallel_grid_phony (as_a <gomp_parallel *> (stmt));
if (phony_construct && ctx->record_type)
{
gcc_checking_assert (!ctx->receiver_decl);
ctx->receiver_decl = create_tmp_var
(build_reference_type (ctx->record_type), ".omp_rec");
}
lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
lower_omp (&par_body, ctx);
if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
lower_reduction_clauses (clauses, &par_rlist, ctx);
/* Declare all the variables created by mapping and the variables
declared in the scope of the parallel body. */
record_vars_into (ctx->block_vars, child_fn);
maybe_remove_omp_member_access_dummy_vars (par_bind);
record_vars_into (gimple_bind_vars (par_bind), child_fn);
if (ctx->record_type)
{
ctx->sender_decl
= create_tmp_var (ctx->srecord_type ? ctx->srecord_type
: ctx->record_type, ".omp_data_o");
DECL_NAMELESS (ctx->sender_decl) = 1;
TREE_ADDRESSABLE (ctx->sender_decl) = 1;
gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
}
olist = NULL;
ilist = NULL;
lower_send_clauses (clauses, &ilist, &olist, ctx);
lower_send_shared_vars (&ilist, &olist, ctx);
if (ctx->record_type)
{
tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
TREE_THIS_VOLATILE (clobber) = 1;
gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
clobber));
}
/* Once all the expansions are done, sequence all the different
fragments inside gimple_omp_body. */
new_body = NULL;
if (ctx->record_type)
{
t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
/* fixup_child_record_type might have changed receiver_decl's type. */
t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (ctx->receiver_decl, t));
}
gimple_seq_add_seq (&new_body, par_ilist);
gimple_seq_add_seq (&new_body, par_body);
gimple_seq_add_seq (&new_body, par_rlist);
if (ctx->cancellable)
gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
gimple_seq_add_seq (&new_body, par_olist);
new_body = maybe_catch_exception (new_body);
if (gimple_code (stmt) == GIMPLE_OMP_TASK)
gimple_seq_add_stmt (&new_body,
gimple_build_omp_continue (integer_zero_node,
integer_zero_node));
if (!phony_construct)
{
gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
gimple_omp_set_body (stmt, new_body);
}
bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
gimple_bind_add_seq (bind, ilist);
if (!phony_construct)
gimple_bind_add_stmt (bind, stmt);
else
gimple_bind_add_seq (bind, new_body);
gimple_bind_add_seq (bind, olist);
pop_gimplify_context (NULL);
if (dep_bind)
{
gimple_bind_add_seq (dep_bind, dep_ilist);
gimple_bind_add_stmt (dep_bind, bind);
gimple_bind_add_seq (dep_bind, dep_olist);
pop_gimplify_context (dep_bind);
}
}
/* Lower the GIMPLE_OMP_TARGET in the current statement
in GSI_P. CTX holds context information for the directive. */
static void
lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree clauses;
tree child_fn, t, c;
gomp_target *stmt = as_a <gomp_target *> (gsi_stmt (*gsi_p));
gbind *tgt_bind, *bind, *dep_bind = NULL;
gimple_seq tgt_body, olist, ilist, fplist, new_body;
location_t loc = gimple_location (stmt);
bool offloaded, data_region;
unsigned int map_cnt = 0;
offloaded = is_gimple_omp_offloaded (stmt);
switch (gimple_omp_target_kind (stmt))
{
case GF_OMP_TARGET_KIND_REGION:
case GF_OMP_TARGET_KIND_UPDATE:
case GF_OMP_TARGET_KIND_ENTER_DATA:
case GF_OMP_TARGET_KIND_EXIT_DATA:
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
case GF_OMP_TARGET_KIND_OACC_KERNELS:
case GF_OMP_TARGET_KIND_OACC_UPDATE:
case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
case GF_OMP_TARGET_KIND_OACC_DECLARE:
data_region = false;
break;
case GF_OMP_TARGET_KIND_DATA:
case GF_OMP_TARGET_KIND_OACC_DATA:
case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
data_region = true;
break;
default:
gcc_unreachable ();
}
clauses = gimple_omp_target_clauses (stmt);
gimple_seq dep_ilist = NULL;
gimple_seq dep_olist = NULL;
if (omp_find_clause (clauses, OMP_CLAUSE_DEPEND))
{
push_gimplify_context ();
dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
lower_depend_clauses (gimple_omp_target_clauses_ptr (stmt),
&dep_ilist, &dep_olist);
}
tgt_bind = NULL;
tgt_body = NULL;
if (offloaded)
{
tgt_bind = gimple_seq_first_stmt_as_a_bind (gimple_omp_body (stmt));
tgt_body = gimple_bind_body (tgt_bind);
}
else if (data_region)
tgt_body = gimple_omp_body (stmt);
child_fn = ctx->cb.dst_fn;
push_gimplify_context ();
fplist = NULL;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
tree var, x;
default:
break;
case OMP_CLAUSE_MAP:
#if CHECKING_P
/* First check what we're prepared to handle in the following. */
switch (OMP_CLAUSE_MAP_KIND (c))
{
case GOMP_MAP_ALLOC:
case GOMP_MAP_TO:
case GOMP_MAP_FROM:
case GOMP_MAP_TOFROM:
case GOMP_MAP_POINTER:
case GOMP_MAP_TO_PSET:
case GOMP_MAP_DELETE:
case GOMP_MAP_RELEASE:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_ALWAYS_TOFROM:
case GOMP_MAP_FIRSTPRIVATE_POINTER:
case GOMP_MAP_FIRSTPRIVATE_REFERENCE:
case GOMP_MAP_STRUCT:
case GOMP_MAP_ALWAYS_POINTER:
break;
case GOMP_MAP_FORCE_ALLOC:
case GOMP_MAP_FORCE_TO:
case GOMP_MAP_FORCE_FROM:
case GOMP_MAP_FORCE_TOFROM:
case GOMP_MAP_FORCE_PRESENT:
case GOMP_MAP_FORCE_DEVICEPTR:
case GOMP_MAP_DEVICE_RESIDENT:
case GOMP_MAP_LINK:
gcc_assert (is_gimple_omp_oacc (stmt));
break;
default:
gcc_unreachable ();
}
#endif
/* FALLTHRU */
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
oacc_firstprivate:
var = OMP_CLAUSE_DECL (c);
if (!DECL_P (var))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
|| (!OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_POINTER)))
map_cnt++;
continue;
}
if (DECL_SIZE (var)
&& TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
{
tree var2 = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
var2 = TREE_OPERAND (var2, 0);
gcc_assert (DECL_P (var2));
var = var2;
}
if (offloaded
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE))
{
if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
{
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx))
&& varpool_node::get_create (var)->offloadable)
continue;
tree type = build_pointer_type (TREE_TYPE (var));
tree new_var = lookup_decl (var, ctx);
x = create_tmp_var_raw (type, get_name (new_var));
gimple_add_tmp_var (x);
x = build_simple_mem_ref (x);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
continue;
}
if (!maybe_lookup_field (var, ctx))
continue;
/* Don't remap oacc parallel reduction variables, because the
intermediate result must be local to each gang. */
if (offloaded && !(OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_IN_REDUCTION (c)))
{
x = build_receiver_ref (var, true, ctx);
tree new_var = lookup_decl (var, ctx);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
&& !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
&& TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
x = build_simple_mem_ref (x);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
gcc_assert (is_gimple_omp_oacc (ctx->stmt));
if (omp_is_reference (new_var))
{
/* Create a local object to hold the instance
value. */
tree type = TREE_TYPE (TREE_TYPE (new_var));
const char *id = IDENTIFIER_POINTER (DECL_NAME (new_var));
tree inst = create_tmp_var (type, id);
gimplify_assign (inst, fold_indirect_ref (x), &fplist);
x = build_fold_addr_expr (inst);
}
gimplify_assign (new_var, x, &fplist);
}
else if (DECL_P (new_var))
{
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
else
gcc_unreachable ();
}
map_cnt++;
break;
case OMP_CLAUSE_FIRSTPRIVATE:
if (is_oacc_parallel (ctx))
goto oacc_firstprivate;
map_cnt++;
var = OMP_CLAUSE_DECL (c);
if (!omp_is_reference (var)
&& !is_gimple_reg_type (TREE_TYPE (var)))
{
tree new_var = lookup_decl (var, ctx);
if (is_variable_sized (var))
{
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_pvar = lookup_decl (pvar, ctx);
x = build_fold_indirect_ref (new_pvar);
TREE_THIS_NOTRAP (x) = 1;
}
else
x = build_receiver_ref (var, true, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
break;
case OMP_CLAUSE_PRIVATE:
if (is_gimple_omp_oacc (ctx->stmt))
break;
var = OMP_CLAUSE_DECL (c);
if (is_variable_sized (var))
{
tree new_var = lookup_decl (var, ctx);
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_pvar = lookup_decl (pvar, ctx);
x = build_fold_indirect_ref (new_pvar);
TREE_THIS_NOTRAP (x) = 1;
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
break;
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_IS_DEVICE_PTR:
var = OMP_CLAUSE_DECL (c);
map_cnt++;
if (is_variable_sized (var))
{
tree new_var = lookup_decl (var, ctx);
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_pvar = lookup_decl (pvar, ctx);
x = build_fold_indirect_ref (new_pvar);
TREE_THIS_NOTRAP (x) = 1;
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
{
tree new_var = lookup_decl (var, ctx);
tree type = build_pointer_type (TREE_TYPE (var));
x = create_tmp_var_raw (type, get_name (new_var));
gimple_add_tmp_var (x);
x = build_simple_mem_ref (x);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
else
{
tree new_var = lookup_decl (var, ctx);
x = create_tmp_var_raw (TREE_TYPE (new_var), get_name (new_var));
gimple_add_tmp_var (x);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
break;
}
if (offloaded)
{
target_nesting_level++;
lower_omp (&tgt_body, ctx);
target_nesting_level--;
}
else if (data_region)
lower_omp (&tgt_body, ctx);
if (offloaded)
{
/* Declare all the variables created by mapping and the variables
declared in the scope of the target body. */
record_vars_into (ctx->block_vars, child_fn);
maybe_remove_omp_member_access_dummy_vars (tgt_bind);
record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
}
olist = NULL;
ilist = NULL;
if (ctx->record_type)
{
ctx->sender_decl
= create_tmp_var (ctx->record_type, ".omp_data_arr");
DECL_NAMELESS (ctx->sender_decl) = 1;
TREE_ADDRESSABLE (ctx->sender_decl) = 1;
t = make_tree_vec (3);
TREE_VEC_ELT (t, 0) = ctx->sender_decl;
TREE_VEC_ELT (t, 1)
= create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
".omp_data_sizes");
DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
tree tkind_type = short_unsigned_type_node;
int talign_shift = 8;
TREE_VEC_ELT (t, 2)
= create_tmp_var (build_array_type_nelts (tkind_type, map_cnt),
".omp_data_kinds");
DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
gimple_omp_target_set_data_arg (stmt, t);
vec<constructor_elt, va_gc> *vsize;
vec<constructor_elt, va_gc> *vkind;
vec_alloc (vsize, map_cnt);
vec_alloc (vkind, map_cnt);
unsigned int map_idx = 0;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
tree ovar, nc, s, purpose, var, x, type;
unsigned int talign;
default:
break;
case OMP_CLAUSE_MAP:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
oacc_firstprivate_map:
nc = c;
ovar = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
break;
if (!DECL_P (ovar))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
{
gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
== get_base_address (ovar));
nc = OMP_CLAUSE_CHAIN (c);
ovar = OMP_CLAUSE_DECL (nc);
}
else
{
tree x = build_sender_ref (ovar, ctx);
tree v
= build_fold_addr_expr_with_type (ovar, ptr_type_node);
gimplify_assign (x, v, &ilist);
nc = NULL_TREE;
}
}
else
{
if (DECL_SIZE (ovar)
&& TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
{
tree ovar2 = DECL_VALUE_EXPR (ovar);
gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
ovar2 = TREE_OPERAND (ovar2, 0);
gcc_assert (DECL_P (ovar2));
ovar = ovar2;
}
if (!maybe_lookup_field (ovar, ctx))
continue;
}
talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
talign = DECL_ALIGN_UNIT (ovar);
if (nc)
{
var = lookup_decl_in_outer_ctx (ovar, ctx);
x = build_sender_ref (ovar, ctx);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
&& !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
&& TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
{
gcc_assert (offloaded);
tree avar
= create_tmp_var (TREE_TYPE (TREE_TYPE (x)));
mark_addressable (avar);
gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
talign = DECL_ALIGN_UNIT (avar);
avar = build_fold_addr_expr (avar);
gimplify_assign (x, avar, &ilist);
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
gcc_assert (is_gimple_omp_oacc (ctx->stmt));
if (!omp_is_reference (var))
{
if (is_gimple_reg (var)
&& OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c))
TREE_NO_WARNING (var) = 1;
var = build_fold_addr_expr (var);
}
else
talign = TYPE_ALIGN_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
gimplify_assign (x, var, &ilist);
}
else if (is_gimple_reg (var))
{
gcc_assert (offloaded);
tree avar = create_tmp_var (TREE_TYPE (var));
mark_addressable (avar);
enum gomp_map_kind map_kind = OMP_CLAUSE_MAP_KIND (c);
if (GOMP_MAP_COPY_TO_P (map_kind)
|| map_kind == GOMP_MAP_POINTER
|| map_kind == GOMP_MAP_TO_PSET
|| map_kind == GOMP_MAP_FORCE_DEVICEPTR)
{
/* If we need to initialize a temporary
with VAR because it is not addressable, and
the variable hasn't been initialized yet, then
we'll get a warning for the store to avar.
Don't warn in that case, the mapping might
be implicit. */
TREE_NO_WARNING (var) = 1;
gimplify_assign (avar, var, &ilist);
}
avar = build_fold_addr_expr (avar);
gimplify_assign (x, avar, &ilist);
if ((GOMP_MAP_COPY_FROM_P (map_kind)
|| map_kind == GOMP_MAP_FORCE_DEVICEPTR)
&& !TYPE_READONLY (TREE_TYPE (var)))
{
x = unshare_expr (x);
x = build_simple_mem_ref (x);
gimplify_assign (var, x, &olist);
}
}
else
{
var = build_fold_addr_expr (var);
gimplify_assign (x, var, &ilist);
}
}
s = NULL_TREE;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
gcc_checking_assert (is_gimple_omp_oacc (ctx->stmt));
s = TREE_TYPE (ovar);
if (TREE_CODE (s) == REFERENCE_TYPE)
s = TREE_TYPE (s);
s = TYPE_SIZE_UNIT (s);
}
else
s = OMP_CLAUSE_SIZE (c);
if (s == NULL_TREE)
s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
s = fold_convert (size_type_node, s);
purpose = size_int (map_idx++);
CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
if (TREE_CODE (s) != INTEGER_CST)
TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
unsigned HOST_WIDE_INT tkind, tkind_zero;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_MAP:
tkind = OMP_CLAUSE_MAP_KIND (c);
tkind_zero = tkind;
if (OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c))
switch (tkind)
{
case GOMP_MAP_ALLOC:
case GOMP_MAP_TO:
case GOMP_MAP_FROM:
case GOMP_MAP_TOFROM:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_ALWAYS_TOFROM:
case GOMP_MAP_RELEASE:
case GOMP_MAP_FORCE_TO:
case GOMP_MAP_FORCE_FROM:
case GOMP_MAP_FORCE_TOFROM:
case GOMP_MAP_FORCE_PRESENT:
tkind_zero = GOMP_MAP_ZERO_LEN_ARRAY_SECTION;
break;
case GOMP_MAP_DELETE:
tkind_zero = GOMP_MAP_DELETE_ZERO_LEN_ARRAY_SECTION;
default:
break;
}
if (tkind_zero != tkind)
{
if (integer_zerop (s))
tkind = tkind_zero;
else if (integer_nonzerop (s))
tkind_zero = tkind;
}
break;
case OMP_CLAUSE_FIRSTPRIVATE:
gcc_checking_assert (is_gimple_omp_oacc (ctx->stmt));
tkind = GOMP_MAP_TO;
tkind_zero = tkind;
break;
case OMP_CLAUSE_TO:
tkind = GOMP_MAP_TO;
tkind_zero = tkind;
break;
case OMP_CLAUSE_FROM:
tkind = GOMP_MAP_FROM;
tkind_zero = tkind;
break;
default:
gcc_unreachable ();
}
gcc_checking_assert (tkind
< (HOST_WIDE_INT_C (1U) << talign_shift));
gcc_checking_assert (tkind_zero
< (HOST_WIDE_INT_C (1U) << talign_shift));
talign = ceil_log2 (talign);
tkind |= talign << talign_shift;
tkind_zero |= talign << talign_shift;
gcc_checking_assert (tkind
<= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
gcc_checking_assert (tkind_zero
<= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
if (tkind == tkind_zero)
x = build_int_cstu (tkind_type, tkind);
else
{
TREE_STATIC (TREE_VEC_ELT (t, 2)) = 0;
x = build3 (COND_EXPR, tkind_type,
fold_build2 (EQ_EXPR, boolean_type_node,
unshare_expr (s), size_zero_node),
build_int_cstu (tkind_type, tkind_zero),
build_int_cstu (tkind_type, tkind));
}
CONSTRUCTOR_APPEND_ELT (vkind, purpose, x);
if (nc && nc != c)
c = nc;
break;
case OMP_CLAUSE_FIRSTPRIVATE:
if (is_oacc_parallel (ctx))
goto oacc_firstprivate_map;
ovar = OMP_CLAUSE_DECL (c);
if (omp_is_reference (ovar))
talign = TYPE_ALIGN_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
else
talign = DECL_ALIGN_UNIT (ovar);
var = lookup_decl_in_outer_ctx (ovar, ctx);
x = build_sender_ref (ovar, ctx);
tkind = GOMP_MAP_FIRSTPRIVATE;
type = TREE_TYPE (ovar);
if (omp_is_reference (ovar))
type = TREE_TYPE (type);
if ((INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) <= POINTER_SIZE)
|| TREE_CODE (type) == POINTER_TYPE)
{
tkind = GOMP_MAP_FIRSTPRIVATE_INT;
tree t = var;
if (omp_is_reference (var))
t = build_simple_mem_ref (var);
else if (OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c))
TREE_NO_WARNING (var) = 1;
if (TREE_CODE (type) != POINTER_TYPE)
t = fold_convert (pointer_sized_int_node, t);
t = fold_convert (TREE_TYPE (x), t);
gimplify_assign (x, t, &ilist);
}
else if (omp_is_reference (var))
gimplify_assign (x, var, &ilist);
else if (is_gimple_reg (var))
{
tree avar = create_tmp_var (TREE_TYPE (var));
mark_addressable (avar);
if (OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c))
TREE_NO_WARNING (var) = 1;
gimplify_assign (avar, var, &ilist);
avar = build_fold_addr_expr (avar);
gimplify_assign (x, avar, &ilist);
}
else
{
var = build_fold_addr_expr (var);
gimplify_assign (x, var, &ilist);
}
if (tkind == GOMP_MAP_FIRSTPRIVATE_INT)
s = size_int (0);
else if (omp_is_reference (ovar))
s = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
else
s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
s = fold_convert (size_type_node, s);
purpose = size_int (map_idx++);
CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
if (TREE_CODE (s) != INTEGER_CST)
TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
gcc_checking_assert (tkind
< (HOST_WIDE_INT_C (1U) << talign_shift));
talign = ceil_log2 (talign);
tkind |= talign << talign_shift;
gcc_checking_assert (tkind
<= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
CONSTRUCTOR_APPEND_ELT (vkind, purpose,
build_int_cstu (tkind_type, tkind));
break;
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_IS_DEVICE_PTR:
ovar = OMP_CLAUSE_DECL (c);
var = lookup_decl_in_outer_ctx (ovar, ctx);
x = build_sender_ref (ovar, ctx);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR)
tkind = GOMP_MAP_USE_DEVICE_PTR;
else
tkind = GOMP_MAP_FIRSTPRIVATE_INT;
type = TREE_TYPE (ovar);
if (TREE_CODE (type) == ARRAY_TYPE)
var = build_fold_addr_expr (var);
else
{
if (omp_is_reference (ovar))
{
type = TREE_TYPE (type);
if (TREE_CODE (type) != ARRAY_TYPE)
var = build_simple_mem_ref (var);
var = fold_convert (TREE_TYPE (x), var);
}
}
gimplify_assign (x, var, &ilist);
s = size_int (0);
purpose = size_int (map_idx++);
CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
gcc_checking_assert (tkind
< (HOST_WIDE_INT_C (1U) << talign_shift));
gcc_checking_assert (tkind
<= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
CONSTRUCTOR_APPEND_ELT (vkind, purpose,
build_int_cstu (tkind_type, tkind));
break;
}
gcc_assert (map_idx == map_cnt);
DECL_INITIAL (TREE_VEC_ELT (t, 1))
= build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
DECL_INITIAL (TREE_VEC_ELT (t, 2))
= build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
for (int i = 1; i <= 2; i++)
if (!TREE_STATIC (TREE_VEC_ELT (t, i)))
{
gimple_seq initlist = NULL;
force_gimple_operand (build1 (DECL_EXPR, void_type_node,
TREE_VEC_ELT (t, i)),
&initlist, true, NULL_TREE);
gimple_seq_add_seq (&ilist, initlist);
tree clobber = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, i)),
NULL);
TREE_THIS_VOLATILE (clobber) = 1;
gimple_seq_add_stmt (&olist,
gimple_build_assign (TREE_VEC_ELT (t, i),
clobber));
}
tree clobber = build_constructor (ctx->record_type, NULL);
TREE_THIS_VOLATILE (clobber) = 1;
gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
clobber));
}
/* Once all the expansions are done, sequence all the different
fragments inside gimple_omp_body. */
new_body = NULL;
if (offloaded
&& ctx->record_type)
{
t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
/* fixup_child_record_type might have changed receiver_decl's type. */
t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (ctx->receiver_decl, t));
}
gimple_seq_add_seq (&new_body, fplist);
if (offloaded || data_region)
{
tree prev = NULL_TREE;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
tree var, x;
default:
break;
case OMP_CLAUSE_FIRSTPRIVATE:
if (is_gimple_omp_oacc (ctx->stmt))
break;
var = OMP_CLAUSE_DECL (c);
if (omp_is_reference (var)
|| is_gimple_reg_type (TREE_TYPE (var)))
{
tree new_var = lookup_decl (var, ctx);
tree type;
type = TREE_TYPE (var);
if (omp_is_reference (var))
type = TREE_TYPE (type);
if ((INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) <= POINTER_SIZE)
|| TREE_CODE (type) == POINTER_TYPE)
{
x = build_receiver_ref (var, false, ctx);
if (TREE_CODE (type) != POINTER_TYPE)
x = fold_convert (pointer_sized_int_node, x);
x = fold_convert (type, x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val,
fb_rvalue);
if (omp_is_reference (var))
{
tree v = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (v);
TREE_ADDRESSABLE (v) = 1;
gimple_seq_add_stmt (&new_body,
gimple_build_assign (v, x));
x = build_fold_addr_expr (v);
}
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
else
{
x = build_receiver_ref (var, !omp_is_reference (var), ctx);
gimplify_expr (&x, &new_body, NULL, is_gimple_val,
fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
}
else if (is_variable_sized (var))
{
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_var = lookup_decl (pvar, ctx);
x = build_receiver_ref (var, false, ctx);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
break;
case OMP_CLAUSE_PRIVATE:
if (is_gimple_omp_oacc (ctx->stmt))
break;
var = OMP_CLAUSE_DECL (c);
if (omp_is_reference (var))
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
tree new_var = lookup_decl (var, ctx);
x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
if (TREE_CONSTANT (x))
{
x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
get_name (var));
gimple_add_tmp_var (x);
TREE_ADDRESSABLE (x) = 1;
x = build_fold_addr_expr_loc (clause_loc, x);
}
else
break;
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
break;
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_IS_DEVICE_PTR:
var = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR)
x = build_sender_ref (var, ctx);
else
x = build_receiver_ref (var, false, ctx);
if (is_variable_sized (var))
{
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_var = lookup_decl (pvar, ctx);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
{
tree new_var = lookup_decl (var, ctx);
new_var = DECL_VALUE_EXPR (new_var);
gcc_assert (TREE_CODE (new_var) == MEM_REF);
new_var = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_var));
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
else
{
tree type = TREE_TYPE (var);
tree new_var = lookup_decl (var, ctx);
if (omp_is_reference (var))
{
type = TREE_TYPE (type);
if (TREE_CODE (type) != ARRAY_TYPE)
{
tree v = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (v);
TREE_ADDRESSABLE (v) = 1;
x = fold_convert (type, x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val,
fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (v, x));
x = build_fold_addr_expr (v);
}
}
new_var = DECL_VALUE_EXPR (new_var);
x = fold_convert (TREE_TYPE (new_var), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
break;
}
/* Handle GOMP_MAP_FIRSTPRIVATE_{POINTER,REFERENCE} in second pass,
so that firstprivate vars holding OMP_CLAUSE_SIZE if needed
are already handled. Similarly OMP_CLAUSE_PRIVATE for VLAs
or references to VLAs. */
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
tree var;
default:
break;
case OMP_CLAUSE_MAP:
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
poly_int64 offset = 0;
gcc_assert (prev);
var = OMP_CLAUSE_DECL (c);
if (DECL_P (var)
&& TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
&& is_global_var (maybe_lookup_decl_in_outer_ctx (var,
ctx))
&& varpool_node::get_create (var)->offloadable)
break;
if (TREE_CODE (var) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (var, 0)) == COMPONENT_REF)
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == COMPONENT_REF)
{
var = get_addr_base_and_unit_offset (var, &offset);
gcc_assert (var != NULL_TREE && DECL_P (var));
}
else if (DECL_SIZE (var)
&& TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
{
tree var2 = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
var2 = TREE_OPERAND (var2, 0);
gcc_assert (DECL_P (var2));
var = var2;
}
tree new_var = lookup_decl (var, ctx), x;
tree type = TREE_TYPE (new_var);
bool is_ref;
if (TREE_CODE (OMP_CLAUSE_DECL (c)) == INDIRECT_REF
&& (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0))
== COMPONENT_REF))
{
type = TREE_TYPE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0));
is_ref = true;
new_var = build2 (MEM_REF, type,
build_fold_addr_expr (new_var),
build_int_cst (build_pointer_type (type),
offset));
}
else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPONENT_REF)
{
type = TREE_TYPE (OMP_CLAUSE_DECL (c));
is_ref = TREE_CODE (type) == REFERENCE_TYPE;
new_var = build2 (MEM_REF, type,
build_fold_addr_expr (new_var),
build_int_cst (build_pointer_type (type),
offset));
}
else
is_ref = omp_is_reference (var);
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
is_ref = false;
bool ref_to_array = false;
if (is_ref)
{
type = TREE_TYPE (type);
if (TREE_CODE (type) == ARRAY_TYPE)
{
type = build_pointer_type (type);
ref_to_array = true;
}
}
else if (TREE_CODE (type) == ARRAY_TYPE)
{
tree decl2 = DECL_VALUE_EXPR (new_var);
gcc_assert (TREE_CODE (decl2) == MEM_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
new_var = decl2;
type = TREE_TYPE (new_var);
}
x = build_receiver_ref (OMP_CLAUSE_DECL (prev), false, ctx);
x = fold_convert_loc (clause_loc, type, x);
if (!integer_zerop (OMP_CLAUSE_SIZE (c)))
{
tree bias = OMP_CLAUSE_SIZE (c);
if (DECL_P (bias))
bias = lookup_decl (bias, ctx);
bias = fold_convert_loc (clause_loc, sizetype, bias);
bias = fold_build1_loc (clause_loc, NEGATE_EXPR, sizetype,
bias);
x = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
TREE_TYPE (x), x, bias);
}
if (ref_to_array)
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
if (is_ref && !ref_to_array)
{
tree t = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (t);
TREE_ADDRESSABLE (t) = 1;
gimple_seq_add_stmt (&new_body,
gimple_build_assign (t, x));
x = build_fold_addr_expr_loc (clause_loc, t);
}
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
prev = NULL_TREE;
}
else if (OMP_CLAUSE_CHAIN (c)
&& OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (c))
== OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c))
== GOMP_MAP_FIRSTPRIVATE_POINTER
|| (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c))
== GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
prev = c;
break;
case OMP_CLAUSE_PRIVATE:
var = OMP_CLAUSE_DECL (c);
if (is_variable_sized (var))
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
tree new_var = lookup_decl (var, ctx);
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_pvar = lookup_decl (pvar, ctx);
tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
tree al = size_int (DECL_ALIGN (var));
tree x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
x = fold_convert_loc (clause_loc, TREE_TYPE (new_pvar), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_pvar, x));
}
else if (omp_is_reference (var) && !is_gimple_omp_oacc (ctx->stmt))
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
tree new_var = lookup_decl (var, ctx);
tree x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
if (TREE_CONSTANT (x))
break;
else
{
tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
tree rtype = TREE_TYPE (TREE_TYPE (new_var));
tree al = size_int (TYPE_ALIGN (rtype));
x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
}
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
break;
}
gimple_seq fork_seq = NULL;
gimple_seq join_seq = NULL;
if (is_oacc_parallel (ctx))
{
/* If there are reductions on the offloaded region itself, treat
them as a dummy GANG loop. */
tree level = build_int_cst (integer_type_node, GOMP_DIM_GANG);
lower_oacc_reductions (gimple_location (ctx->stmt), clauses, level,
false, NULL, NULL, &fork_seq, &join_seq, ctx);
}
gimple_seq_add_seq (&new_body, fork_seq);
gimple_seq_add_seq (&new_body, tgt_body);
gimple_seq_add_seq (&new_body, join_seq);
if (offloaded)
new_body = maybe_catch_exception (new_body);
gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
gimple_omp_set_body (stmt, new_body);
}
bind = gimple_build_bind (NULL, NULL,
tgt_bind ? gimple_bind_block (tgt_bind)
: NULL_TREE);
gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
gimple_bind_add_seq (bind, ilist);
gimple_bind_add_stmt (bind, stmt);
gimple_bind_add_seq (bind, olist);
pop_gimplify_context (NULL);
if (dep_bind)
{
gimple_bind_add_seq (dep_bind, dep_ilist);
gimple_bind_add_stmt (dep_bind, bind);
gimple_bind_add_seq (dep_bind, dep_olist);
pop_gimplify_context (dep_bind);
}
}
/* Expand code for an OpenMP teams directive. */
static void
lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gomp_teams *teams_stmt = as_a <gomp_teams *> (gsi_stmt (*gsi_p));
push_gimplify_context ();
tree block = make_node (BLOCK);
gbind *bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_seq bind_body = NULL;
gimple_seq dlist = NULL;
gimple_seq olist = NULL;
tree num_teams = omp_find_clause (gimple_omp_teams_clauses (teams_stmt),
OMP_CLAUSE_NUM_TEAMS);
if (num_teams == NULL_TREE)
num_teams = build_int_cst (unsigned_type_node, 0);
else
{
num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
num_teams = fold_convert (unsigned_type_node, num_teams);
gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
}
tree thread_limit = omp_find_clause (gimple_omp_teams_clauses (teams_stmt),
OMP_CLAUSE_THREAD_LIMIT);
if (thread_limit == NULL_TREE)
thread_limit = build_int_cst (unsigned_type_node, 0);
else
{
thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
thread_limit = fold_convert (unsigned_type_node, thread_limit);
gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
fb_rvalue);
}
lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
&bind_body, &dlist, ctx, NULL);
lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
if (!gimple_omp_teams_grid_phony (teams_stmt))
{
gimple_seq_add_stmt (&bind_body, teams_stmt);
location_t loc = gimple_location (teams_stmt);
tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
gimple *call = gimple_build_call (decl, 2, num_teams, thread_limit);
gimple_set_location (call, loc);
gimple_seq_add_stmt (&bind_body, call);
}
gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
gimple_omp_set_body (teams_stmt, NULL);
gimple_seq_add_seq (&bind_body, olist);
gimple_seq_add_seq (&bind_body, dlist);
if (!gimple_omp_teams_grid_phony (teams_stmt))
gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
gimple_bind_set_body (bind, bind_body);
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
}
/* Expand code within an artificial GIMPLE_OMP_GRID_BODY OMP construct. */
static void
lower_omp_grid_body (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gimple *stmt = gsi_stmt (*gsi_p);
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_seq_add_stmt (gimple_omp_body_ptr (stmt),
gimple_build_omp_return (false));
}
/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
regimplified. If DATA is non-NULL, lower_omp_1 is outside
of OMP context, but with task_shared_vars set. */
static tree
lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
void *data)
{
tree t = *tp;
/* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
if (VAR_P (t) && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
return t;
if (task_shared_vars
&& DECL_P (t)
&& bitmap_bit_p (task_shared_vars, DECL_UID (t)))
return t;
/* If a global variable has been privatized, TREE_CONSTANT on
ADDR_EXPR might be wrong. */
if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (t);
*walk_subtrees = !IS_TYPE_OR_DECL_P (t);
return NULL_TREE;
}
/* Data to be communicated between lower_omp_regimplify_operands and
lower_omp_regimplify_operands_p. */
struct lower_omp_regimplify_operands_data
{
omp_context *ctx;
vec<tree> *decls;
};
/* Helper function for lower_omp_regimplify_operands. Find
omp_member_access_dummy_var vars and adjust temporarily their
DECL_VALUE_EXPRs if needed. */
static tree
lower_omp_regimplify_operands_p (tree *tp, int *walk_subtrees,
void *data)
{
tree t = omp_member_access_dummy_var (*tp);
if (t)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
lower_omp_regimplify_operands_data *ldata
= (lower_omp_regimplify_operands_data *) wi->info;
tree o = maybe_lookup_decl (t, ldata->ctx);
if (o != t)
{
ldata->decls->safe_push (DECL_VALUE_EXPR (*tp));
ldata->decls->safe_push (*tp);
tree v = unshare_and_remap (DECL_VALUE_EXPR (*tp), t, o);
SET_DECL_VALUE_EXPR (*tp, v);
}
}
*walk_subtrees = !IS_TYPE_OR_DECL_P (*tp);
return NULL_TREE;
}
/* Wrapper around gimple_regimplify_operands that adjusts DECL_VALUE_EXPRs
of omp_member_access_dummy_var vars during regimplification. */
static void
lower_omp_regimplify_operands (omp_context *ctx, gimple *stmt,
gimple_stmt_iterator *gsi_p)
{
auto_vec<tree, 10> decls;
if (ctx)
{
struct walk_stmt_info wi;
memset (&wi, '\0', sizeof (wi));
struct lower_omp_regimplify_operands_data data;
data.ctx = ctx;
data.decls = &decls;
wi.info = &data;
walk_gimple_op (stmt, lower_omp_regimplify_operands_p, &wi);
}
gimple_regimplify_operands (stmt, gsi_p);
while (!decls.is_empty ())
{
tree t = decls.pop ();
tree v = decls.pop ();
SET_DECL_VALUE_EXPR (t, v);
}
}
static void
lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gimple *stmt = gsi_stmt (*gsi_p);
struct walk_stmt_info wi;
gcall *call_stmt;
if (gimple_has_location (stmt))
input_location = gimple_location (stmt);
if (task_shared_vars)
memset (&wi, '\0', sizeof (wi));
/* If we have issued syntax errors, avoid doing any heavy lifting.
Just replace the OMP directives with a NOP to avoid
confusing RTL expansion. */
if (seen_error () && is_gimple_omp (stmt))
{
gsi_replace (gsi_p, gimple_build_nop (), true);
return;
}
switch (gimple_code (stmt))
{
case GIMPLE_COND:
{
gcond *cond_stmt = as_a <gcond *> (stmt);
if ((ctx || task_shared_vars)
&& (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
lower_omp_regimplify_p,
ctx ? NULL : &wi, NULL)
|| walk_tree (gimple_cond_rhs_ptr (cond_stmt),
lower_omp_regimplify_p,
ctx ? NULL : &wi, NULL)))
lower_omp_regimplify_operands (ctx, cond_stmt, gsi_p);
}
break;
case GIMPLE_CATCH:
lower_omp (gimple_catch_handler_ptr (as_a <gcatch *> (stmt)), ctx);
break;
case GIMPLE_EH_FILTER:
lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
break;
case GIMPLE_TRY:
lower_omp (gimple_try_eval_ptr (stmt), ctx);
lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
break;
case GIMPLE_TRANSACTION:
lower_omp (gimple_transaction_body_ptr (as_a <gtransaction *> (stmt)),
ctx);
break;
case GIMPLE_BIND:
lower_omp (gimple_bind_body_ptr (as_a <gbind *> (stmt)), ctx);
maybe_remove_omp_member_access_dummy_vars (as_a <gbind *> (stmt));
break;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
if (ctx->cancellable)
ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_taskreg (gsi_p, ctx);
break;
case GIMPLE_OMP_FOR:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
if (ctx->cancellable)
ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_for (gsi_p, ctx);
break;
case GIMPLE_OMP_SECTIONS:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
if (ctx->cancellable)
ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_sections (gsi_p, ctx);
break;
case GIMPLE_OMP_SINGLE:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_single (gsi_p, ctx);
break;
case GIMPLE_OMP_MASTER:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_master (gsi_p, ctx);
break;
case GIMPLE_OMP_TASKGROUP:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_taskgroup (gsi_p, ctx);
break;
case GIMPLE_OMP_ORDERED:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_ordered (gsi_p, ctx);
break;
case GIMPLE_OMP_CRITICAL:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_critical (gsi_p, ctx);
break;
case GIMPLE_OMP_ATOMIC_LOAD:
if ((ctx || task_shared_vars)
&& walk_tree (gimple_omp_atomic_load_rhs_ptr (
as_a <gomp_atomic_load *> (stmt)),
lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
lower_omp_regimplify_operands (ctx, stmt, gsi_p);
break;
case GIMPLE_OMP_TARGET:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_target (gsi_p, ctx);
break;
case GIMPLE_OMP_TEAMS:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_teams (gsi_p, ctx);
break;
case GIMPLE_OMP_GRID_BODY:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_grid_body (gsi_p, ctx);
break;
case GIMPLE_CALL:
tree fndecl;
call_stmt = as_a <gcall *> (stmt);
fndecl = gimple_call_fndecl (call_stmt);
if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_GOMP_BARRIER:
if (ctx == NULL)
break;
/* FALLTHRU */
case BUILT_IN_GOMP_CANCEL:
case BUILT_IN_GOMP_CANCELLATION_POINT:
omp_context *cctx;
cctx = ctx;
if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
cctx = cctx->outer;
gcc_assert (gimple_call_lhs (call_stmt) == NULL_TREE);
if (!cctx->cancellable)
{
if (DECL_FUNCTION_CODE (fndecl)
== BUILT_IN_GOMP_CANCELLATION_POINT)
{
stmt = gimple_build_nop ();
gsi_replace (gsi_p, stmt, false);
}
break;
}
if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
{
fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
gimple_call_set_fndecl (call_stmt, fndecl);
gimple_call_set_fntype (call_stmt, TREE_TYPE (fndecl));
}
tree lhs;
lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)));
gimple_call_set_lhs (call_stmt, lhs);
tree fallthru_label;
fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
gimple *g;
g = gimple_build_label (fallthru_label);
gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
g = gimple_build_cond (NE_EXPR, lhs,
fold_convert (TREE_TYPE (lhs),
boolean_false_node),
cctx->cancel_label, fallthru_label);
gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
break;
default:
break;
}
/* FALLTHRU */
default:
if ((ctx || task_shared_vars)
&& walk_gimple_op (stmt, lower_omp_regimplify_p,
ctx ? NULL : &wi))
{
/* Just remove clobbers, this should happen only if we have
"privatized" local addressable variables in SIMD regions,
the clobber isn't needed in that case and gimplifying address
of the ARRAY_REF into a pointer and creating MEM_REF based
clobber would create worse code than we get with the clobber
dropped. */
if (gimple_clobber_p (stmt))
{
gsi_replace (gsi_p, gimple_build_nop (), true);
break;
}
lower_omp_regimplify_operands (ctx, stmt, gsi_p);
}
break;
}
}
static void
lower_omp (gimple_seq *body, omp_context *ctx)
{
location_t saved_location = input_location;
gimple_stmt_iterator gsi;
for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
lower_omp_1 (&gsi, ctx);
/* During gimplification, we haven't folded statments inside offloading
or taskreg regions (gimplify.c:maybe_fold_stmt); do that now. */
if (target_nesting_level || taskreg_nesting_level)
for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
fold_stmt (&gsi);
input_location = saved_location;
}
/* Main entry point. */
static unsigned int
execute_lower_omp (void)
{
gimple_seq body;
int i;
omp_context *ctx;
/* This pass always runs, to provide PROP_gimple_lomp.
But often, there is nothing to do. */
if (flag_openacc == 0 && flag_openmp == 0
&& flag_openmp_simd == 0)
return 0;
all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
delete_omp_context);
body = gimple_body (current_function_decl);
if (hsa_gen_requested_p ())
omp_grid_gridify_all_targets (&body);
scan_omp (&body, NULL);
gcc_assert (taskreg_nesting_level == 0);
FOR_EACH_VEC_ELT (taskreg_contexts, i, ctx)
finish_taskreg_scan (ctx);
taskreg_contexts.release ();
if (all_contexts->root)
{
if (task_shared_vars)
push_gimplify_context ();
lower_omp (&body, NULL);
if (task_shared_vars)
pop_gimplify_context (NULL);
}
if (all_contexts)
{
splay_tree_delete (all_contexts);
all_contexts = NULL;
}
BITMAP_FREE (task_shared_vars);
/* If current function is a method, remove artificial dummy VAR_DECL created
for non-static data member privatization, they aren't needed for
debuginfo nor anything else, have been already replaced everywhere in the
IL and cause problems with LTO. */
if (DECL_ARGUMENTS (current_function_decl)
&& DECL_ARTIFICIAL (DECL_ARGUMENTS (current_function_decl))
&& (TREE_CODE (TREE_TYPE (DECL_ARGUMENTS (current_function_decl)))
== POINTER_TYPE))
remove_member_access_dummy_vars (DECL_INITIAL (current_function_decl));
return 0;
}
namespace {
const pass_data pass_data_lower_omp =
{
GIMPLE_PASS, /* type */
"omplower", /* name */
OPTGROUP_OMP, /* optinfo_flags */
TV_NONE, /* tv_id */
PROP_gimple_any, /* properties_required */
PROP_gimple_lomp | PROP_gimple_lomp_dev, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_lower_omp : public gimple_opt_pass
{
public:
pass_lower_omp (gcc::context *ctxt)
: gimple_opt_pass (pass_data_lower_omp, ctxt)
{}
/* opt_pass methods: */
virtual unsigned int execute (function *) { return execute_lower_omp (); }
}; // class pass_lower_omp
} // anon namespace
gimple_opt_pass *
make_pass_lower_omp (gcc::context *ctxt)
{
return new pass_lower_omp (ctxt);
}
/* The following is a utility to diagnose structured block violations.
It is not part of the "omplower" pass, as that's invoked too late. It
should be invoked by the respective front ends after gimplification. */
static splay_tree all_labels;
/* Check for mismatched contexts and generate an error if needed. Return
true if an error is detected. */
static bool
diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
gimple *branch_ctx, gimple *label_ctx)
{
gcc_checking_assert (!branch_ctx || is_gimple_omp (branch_ctx));
gcc_checking_assert (!label_ctx || is_gimple_omp (label_ctx));
if (label_ctx == branch_ctx)
return false;
const char* kind = NULL;
if (flag_openacc)
{
if ((branch_ctx && is_gimple_omp_oacc (branch_ctx))
|| (label_ctx && is_gimple_omp_oacc (label_ctx)))
{
gcc_checking_assert (kind == NULL);
kind = "OpenACC";
}
}
if (kind == NULL)
{
gcc_checking_assert (flag_openmp || flag_openmp_simd);
kind = "OpenMP";
}
/* Previously we kept track of the label's entire context in diagnose_sb_[12]
so we could traverse it and issue a correct "exit" or "enter" error
message upon a structured block violation.
We built the context by building a list with tree_cons'ing, but there is
no easy counterpart in gimple tuples. It seems like far too much work
for issuing exit/enter error messages. If someone really misses the
distinct error message... patches welcome. */
#if 0
/* Try to avoid confusing the user by producing and error message
with correct "exit" or "enter" verbiage. We prefer "exit"
unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
if (branch_ctx == NULL)
exit_p = false;
else
{
while (label_ctx)
{
if (TREE_VALUE (label_ctx) == branch_ctx)
{
exit_p = false;
break;
}
label_ctx = TREE_CHAIN (label_ctx);
}
}
if (exit_p)
error ("invalid exit from %s structured block", kind);
else
error ("invalid entry to %s structured block", kind);
#endif
/* If it's obvious we have an invalid entry, be specific about the error. */
if (branch_ctx == NULL)
error ("invalid entry to %s structured block", kind);
else
{
/* Otherwise, be vague and lazy, but efficient. */
error ("invalid branch to/from %s structured block", kind);
}
gsi_replace (gsi_p, gimple_build_nop (), false);
return true;
}
/* Pass 1: Create a minimal tree of structured blocks, and record
where each label is found. */
static tree
diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *context = (gimple *) wi->info;
gimple *inner_context;
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_TARGET:
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_TASKGROUP:
/* The minimal context here is just the current OMP construct. */
inner_context = stmt;
wi->info = inner_context;
walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
wi->info = context;
break;
case GIMPLE_OMP_FOR:
inner_context = stmt;
wi->info = inner_context;
/* gimple_omp_for_{index,initial,final} are all DECLs; no need to
walk them. */
walk_gimple_seq (gimple_omp_for_pre_body (stmt),
diagnose_sb_1, NULL, wi);
walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
wi->info = context;
break;
case GIMPLE_LABEL:
splay_tree_insert (all_labels,
(splay_tree_key) gimple_label_label (
as_a <glabel *> (stmt)),
(splay_tree_value) context);
break;
default:
break;
}
return NULL_TREE;
}
/* Pass 2: Check each branch and see if its context differs from that of
the destination label's context. */
static tree
diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *context = (gimple *) wi->info;
splay_tree_node n;
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_TARGET:
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_TASKGROUP:
wi->info = stmt;
walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
wi->info = context;
break;
case GIMPLE_OMP_FOR:
wi->info = stmt;
/* gimple_omp_for_{index,initial,final} are all DECLs; no need to
walk them. */
walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
diagnose_sb_2, NULL, wi);
walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
wi->info = context;
break;
case GIMPLE_COND:
{
gcond *cond_stmt = as_a <gcond *> (stmt);
tree lab = gimple_cond_true_label (cond_stmt);
if (lab)
{
n = splay_tree_lookup (all_labels,
(splay_tree_key) lab);
diagnose_sb_0 (gsi_p, context,
n ? (gimple *) n->value : NULL);
}
lab = gimple_cond_false_label (cond_stmt);
if (lab)
{
n = splay_tree_lookup (all_labels,
(splay_tree_key) lab);
diagnose_sb_0 (gsi_p, context,
n ? (gimple *) n->value : NULL);
}
}
break;
case GIMPLE_GOTO:
{
tree lab = gimple_goto_dest (stmt);
if (TREE_CODE (lab) != LABEL_DECL)
break;
n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
diagnose_sb_0 (gsi_p, context, n ? (gimple *) n->value : NULL);
}
break;
case GIMPLE_SWITCH:
{
gswitch *switch_stmt = as_a <gswitch *> (stmt);
unsigned int i;
for (i = 0; i < gimple_switch_num_labels (switch_stmt); ++i)
{
tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
if (n && diagnose_sb_0 (gsi_p, context, (gimple *) n->value))
break;
}
}
break;
case GIMPLE_RETURN:
diagnose_sb_0 (gsi_p, context, NULL);
break;
default:
break;
}
return NULL_TREE;
}
static unsigned int
diagnose_omp_structured_block_errors (void)
{
struct walk_stmt_info wi;
gimple_seq body = gimple_body (current_function_decl);
all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
memset (&wi, 0, sizeof (wi));
walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
memset (&wi, 0, sizeof (wi));
wi.want_locations = true;
walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
gimple_set_body (current_function_decl, body);
splay_tree_delete (all_labels);
all_labels = NULL;
return 0;
}
namespace {
const pass_data pass_data_diagnose_omp_blocks =
{
GIMPLE_PASS, /* type */
"*diagnose_omp_blocks", /* name */
OPTGROUP_OMP, /* optinfo_flags */
TV_NONE, /* tv_id */
PROP_gimple_any, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_diagnose_omp_blocks : public gimple_opt_pass
{
public:
pass_diagnose_omp_blocks (gcc::context *ctxt)
: gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *)
{
return flag_openacc || flag_openmp || flag_openmp_simd;
}
virtual unsigned int execute (function *)
{
return diagnose_omp_structured_block_errors ();
}
}; // class pass_diagnose_omp_blocks
} // anon namespace
gimple_opt_pass *
make_pass_diagnose_omp_blocks (gcc::context *ctxt)
{
return new pass_diagnose_omp_blocks (ctxt);
}
#include "gt-omp-low.h"
|
GB_unaryop__identity_int16_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int16_int64
// op(A') function: GB_tran__identity_int16_int64
// C type: int16_t
// A type: int64_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int16_int64
(
int16_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int16_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_emult_04.c | //------------------------------------------------------------------------------
// GB_emult_04: C<M>= A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C<M>= A.*B, M sparse/hyper, A and B bitmap/full. C has the same sparsity
// structure as M, and its pattern is a subset of M.
// ------------------------------------------
// C <M>= A .* B
// ------------------------------------------
// sparse sparse bitmap bitmap (method: 04)
// sparse sparse bitmap full (method: 04)
// sparse sparse full bitmap (method: 04)
// sparse sparse full full (method: 04)
// TODO: this function can also do eWiseAdd, just as easily.
// Just change the "&&" to "||" in the GB_emult_04_template.
// If A and B are both full, eadd and emult are identical.
#include "GB_ewise.h"
#include "GB_emult.h"
#include "GB_binop.h"
#include "GB_unused.h"
#ifndef GBCOMPACT
#include "GB_binop__include.h"
#endif
#define GB_FREE_WORK \
{ \
GB_WERK_POP (Work, int64_t) ; \
GB_WERK_POP (M_ek_slicing, int64_t) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_phbix_free (C) ; \
}
GrB_Info GB_emult_04 // C<M>=A.*B, M sparse/hyper, A and B bitmap/full
(
GrB_Matrix C, // output matrix, static header
const GrB_Type ctype, // type of output matrix C
const bool C_is_csc, // format of output matrix C
const GrB_Matrix M, // sparse/hyper, not NULL
const bool Mask_struct, // if true, use the only structure of M
bool *mask_applied, // if true, the mask was applied
const GrB_Matrix A, // input A matrix (bitmap/full)
const GrB_Matrix B, // input B matrix (bitmap/full)
const GrB_BinaryOp op, // op to perform C = op (A,B)
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (C != NULL && C->static_header) ;
ASSERT_MATRIX_OK (M, "M for emult_04", GB0) ;
ASSERT_MATRIX_OK (A, "A for emult_04", GB0) ;
ASSERT_MATRIX_OK (B, "B for emult_04", GB0) ;
ASSERT_BINARYOP_OK (op, "op for emult_04", GB0) ;
ASSERT (GB_IS_SPARSE (M) || GB_IS_HYPERSPARSE (M)) ;
ASSERT (!GB_PENDING (M)) ;
ASSERT (GB_JUMBLED_OK (M)) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (GB_IS_BITMAP (A) || GB_IS_FULL (A) || GB_as_if_full (A)) ;
ASSERT (GB_IS_BITMAP (B) || GB_IS_FULL (B) || GB_as_if_full (B)) ;
int C_sparsity = GB_sparsity (M) ;
GBURBLE ("emult_04:(%s<%s>=%s.*%s) ",
GB_sparsity_char (C_sparsity),
GB_sparsity_char_matrix (M),
GB_sparsity_char_matrix (A),
GB_sparsity_char_matrix (B)) ;
//--------------------------------------------------------------------------
// declare workspace
//--------------------------------------------------------------------------
GB_WERK_DECLARE (Work, int64_t) ;
int64_t *restrict Wfirst = NULL ;
int64_t *restrict Wlast = NULL ;
int64_t *restrict Cp_kfirst = NULL ;
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
//--------------------------------------------------------------------------
// get M, A, and B
//--------------------------------------------------------------------------
const int64_t *restrict Mp = M->p ;
const int64_t *restrict Mh = M->h ;
const int64_t *restrict Mi = M->i ;
const GB_void *restrict Mx = (Mask_struct) ? NULL : (GB_void *) M->x ;
const int64_t vlen = M->vlen ;
const int64_t vdim = M->vdim ;
const int64_t nvec = M->nvec ;
const int64_t mnz = GB_nnz (M) ;
const size_t msize = M->type->size ;
const int8_t *restrict Ab = A->b ;
const int8_t *restrict Bb = B->b ;
//--------------------------------------------------------------------------
// check if C is iso and compute its iso value if it is
//--------------------------------------------------------------------------
const size_t csize = ctype->size ;
GB_void cscalar [GB_VLA(csize)] ;
bool C_iso = GB_iso_emult (cscalar, ctype, A, B, op) ;
//--------------------------------------------------------------------------
// allocate C->p and C->h
//--------------------------------------------------------------------------
GB_OK (GB_new (&C, true, // sparse or hyper (same as M), static header
ctype, vlen, vdim, GB_Ap_calloc, C_is_csc,
C_sparsity, M->hyper_switch, nvec, Context)) ;
int64_t *restrict Cp = C->p ;
//--------------------------------------------------------------------------
// slice the mask matrix M
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int M_ntasks, M_nthreads ;
GB_SLICE_MATRIX (M, 8, chunk) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_WERK_PUSH (Work, 3*M_ntasks, int64_t) ;
if (Work == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
Wfirst = Work ;
Wlast = Work + M_ntasks ;
Cp_kfirst = Work + M_ntasks * 2 ;
//--------------------------------------------------------------------------
// count entries in C
//--------------------------------------------------------------------------
// This phase is very similar to GB_select_phase1 (GB_ENTRY_SELECTOR).
// TODO: if M is structural and A and B are both full, then C has exactly
// the same pattern as M, the first phase can be skipped.
int tid ;
#pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < M_ntasks ; tid++)
{
int64_t kfirst = kfirst_Mslice [tid] ;
int64_t klast = klast_Mslice [tid] ;
Wfirst [tid] = 0 ;
Wlast [tid] = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// count the entries in C(:,j)
int64_t j = GBH (Mh, k) ;
int64_t pstart = j * vlen ; // start of A(:,j) and B(:,j)
int64_t pM, pM_end ;
GB_get_pA (&pM, &pM_end, tid, k,
kfirst, klast, pstart_Mslice, Mp, vlen) ;
int64_t cjnz = 0 ;
for ( ; pM < pM_end ; pM++)
{
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij)
{
int64_t i = Mi [pM] ;
cjnz +=
(GBB (Ab, pstart + i)
&& // TODO: for GB_add, use || instead
GBB (Bb, pstart + i)) ;
}
}
if (k == kfirst)
{
Wfirst [tid] = cjnz ;
}
else if (k == klast)
{
Wlast [tid] = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
}
}
//--------------------------------------------------------------------------
// finalize Cp, cumulative sum of Cp and compute Cp_kfirst
//--------------------------------------------------------------------------
GB_ek_slice_merge1 (Cp, Wfirst, Wlast, M_ek_slicing, M_ntasks) ;
GB_ek_slice_merge2 (&(C->nvec_nonempty), Cp_kfirst, Cp, nvec,
Wfirst, Wlast, M_ek_slicing, M_ntasks, M_nthreads, Context) ;
//--------------------------------------------------------------------------
// allocate C->i and C->x
//--------------------------------------------------------------------------
int64_t cnz = Cp [nvec] ;
// set C->iso = C_iso OK
GB_OK (GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, C_iso, Context)) ;
//--------------------------------------------------------------------------
// copy pattern into C
//--------------------------------------------------------------------------
// TODO: could make these components of C shallow instead
if (GB_IS_HYPERSPARSE (M))
{
// copy M->h into C->h
GB_memcpy (C->h, Mh, nvec * sizeof (int64_t), M_nthreads) ;
}
C->nvec = nvec ;
C->jumbled = M->jumbled ;
C->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// get the opcode
//--------------------------------------------------------------------------
GB_Opcode opcode = op->opcode ;
bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ;
bool op_is_first = (opcode == GB_FIRST_opcode) ;
bool op_is_second = (opcode == GB_SECOND_opcode) ;
bool op_is_pair = (opcode == GB_PAIR_opcode) ;
GB_Type_code ccode = ctype->code ;
//--------------------------------------------------------------------------
// check if the values of A and/or B are ignored
//--------------------------------------------------------------------------
// With C = ewisemult (A,B), only the intersection of A and B is used.
// If op is SECOND or PAIR, the values of A are never accessed.
// If op is FIRST or PAIR, the values of B are never accessed.
// If op is PAIR, the values of A and B are never accessed.
// Contrast with ewiseadd.
// A is passed as x, and B as y, in z = op(x,y)
bool A_is_pattern = op_is_second || op_is_pair || op_is_positional ;
bool B_is_pattern = op_is_first || op_is_pair || op_is_positional ;
//--------------------------------------------------------------------------
// using a built-in binary operator (except for positional operators)
//--------------------------------------------------------------------------
#define GB_PHASE_2_OF_2
if (C_iso)
{
//----------------------------------------------------------------------
// C is iso
//----------------------------------------------------------------------
// Cx [0] = cscalar = op (A,B)
GB_BURBLE_MATRIX (C, "(iso emult) ") ;
memcpy (C->x, cscalar, csize) ;
// pattern of C = set intersection of pattern of A and B
#define GB_ISO_EMULT
#include "GB_emult_04_template.c"
}
else
{
//----------------------------------------------------------------------
// C is non-iso
//----------------------------------------------------------------------
bool done = false ;
#ifndef GBCOMPACT
//------------------------------------------------------------------
// define the worker for the switch factory
//------------------------------------------------------------------
#define GB_AemultB_04(mult,xname) GB (_AemultB_04_ ## mult ## xname)
#define GB_BINOP_WORKER(mult,xname) \
{ \
info = GB_AemultB_04(mult,xname) (C, M, Mask_struct, A, B, \
Cp_kfirst, M_ek_slicing, M_ntasks, M_nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
GB_Type_code xcode, ycode, zcode ;
if (!op_is_positional &&
GB_binop_builtin (A->type, A_is_pattern, B->type, B_is_pattern,
op, false, &opcode, &xcode, &ycode, &zcode) && ccode == zcode)
{
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
#endif
//----------------------------------------------------------------------
// generic worker
//----------------------------------------------------------------------
if (!done)
{
GB_BURBLE_MATRIX (C, "(generic emult_04: %s) ", op->name) ;
GB_ewise_generic (C, op, NULL, 0, 0,
NULL, NULL, NULL, C_sparsity, GB_EMULT_METHOD4, Cp_kfirst,
M_ek_slicing, M_ntasks, M_nthreads, NULL, 0, 0, NULL, 0, 0,
M, Mask_struct, false, A, B, Context) ;
}
}
//--------------------------------------------------------------------------
// remove empty vectors from C, if hypersparse
//--------------------------------------------------------------------------
GB_OK (GB_hypermatrix_prune (C, Context)) ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
ASSERT_MATRIX_OK (C, "C output for emult_04", GB0) ;
(*mask_applied) = true ;
return (GrB_SUCCESS) ;
}
|
DRB021-reductionmissing-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A kernel with two level parallelizable loop with reduction:
if reduction(+:sum) is missing, there is race condition.
Data race pairs: we allow multiple pairs to preserve the pattern.
sum@70:7 vs. sum@70:7
sum@70:7 vs. sum@70:13
*/
#include "omprace.h"
#include <omp.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
omprace_init();
int i,j;
float temp, sum=0.0;
int len=100;
float u[100][100];
for (i = 0; i < len; i++)
for (j = 0; j < len; j++)
u[i][j] = 0.5;
#pragma omp parallel for private (temp,i,j)
for (i = 0; i < len; i++)
for (j = 0; j < len; j++)
{
temp = u[i][j];
sum = sum + temp * temp;
}
printf ("sum = %f\n", sum);
omprace_fini();
return 0;
}
|
srpt_main.c | #include <math.h>
#include <omp.h>
#include "systems.h"
typedef struct {
int ndat, idxmin, pardim;
double *e_ab;
char (*param_names)[10];
char (*param_atoms)[10];
} DATAFUNC;
double opt_me(unsigned pardim, const double *x, double *grad, void *func_data)
{
DATAFUNC *fd = (DATAFUNC *) func_data;
int ndat = fd->ndat;
int idxmin = fd->idxmin;
double e_srp[ndat];
double e_ab[ndat];
double energy;
double sumsq = 0.0;
double target;
char param_names[pardim][10];
char param_atoms[pardim][10];
for (unsigned i = 0; i < pardim; ++i) {
strcpy(param_names[i], fd->param_names[i]);
strcpy(param_atoms[i], fd->param_atoms[i]);
}
for (int i = 0; i < ndat; ++i) {
e_ab[i] = fd->e_ab[i];
}
FILE * fs;
fs = fopen("mopac_parameter", "w+");
if (!fs)
exit(EXIT_FAILURE);
for (unsigned i = 0; i < pardim; ++i) {
fprintf(fs, "%s %s %lf\n", param_names[i], param_atoms[i], x[i]);
}
fclose(fs);
#pragma omp parallel for
for (int i = 0; i < ndat; ++i) {
char callmop[0x100];
snprintf(callmop, sizeof(callmop),
"/home/rpanades/bin/MOPACMINE/MOPAC2016.exe \
./inp_semp/geo_%d.mop &>/dev/null", i);
int run = system(callmop);
}
for (int i = 0; i < ndat; ++i) {
energy = NAN;
char line[] = "TOTAL ENERGY";
char tmp[500];
char outfile[500];
FILE * ft;
snprintf(outfile, sizeof(outfile), "./inp_semp/geo_%d.out", i);
ft = fopen(outfile, "r");
if (!ft)
exit(EXIT_FAILURE);
while (fgets(tmp, 500, ft) != NULL) {
if ((strstr(tmp, line)) != NULL) {
sscanf(tmp, "%*s %*s %*s %lf %*s", &energy);
}
}
fclose(ft);
e_srp[i] = energy * 8.065544005e3;
}
double minesrp = e_srp[idxmin];
for (int i = 0; i < ndat; ++i) {
e_srp[i] -= minesrp;
sumsq += (e_srp[i] - e_ab[i]) * (e_srp[i] - e_ab[i]);
}
target = sqrt(sumsq / ndat);
FILE * fu;
fu = fopen("rms_values", "a");
fprintf(fu, "%lf\n", target);
fclose(fu);
FILE * fx;
fx = fopen("e_srp", "a");
for (int i = 0; i < ndat; ++i) {
fprintf(fx, "%lf\n", e_srp[i]);
}
fclose(fx);
return target;
}
int main(void)
{
// Input files processing and variable initialization
DATAFUNC func_data = {.ndat = 0, .pardim = 0};
int i = 0, ch = 0;
double ** data;
double mineab = HUGE_VAL;
FILE * fn;
fn = fopen("./inp_ab.txt", "r");
if (!fn)
exit(EXIT_FAILURE);
while ( (ch = fgetc(fn)) != EOF) {
if (ch == '\n') {
func_data.ndat++;
}
}
rewind(fn);
func_data.e_ab = malloc(func_data.ndat * sizeof(func_data.e_ab));
data = (double **) malloc(func_data.ndat * sizeof(double*));
for (int i = 0; i < func_data.ndat; i++) {
data[i] = (double *) malloc(dim * sizeof(double));
}
if (data == NULL) {
printf("Error: memory not allocated\n");
exit(0);
}
for (int i = 0; i < func_data.ndat; ++i) {
for (int j = 0; j < dim; ++j) {
fscanf(fn, "%lf", &data[i][j]);
}
fscanf(fn, "%lf", &func_data.e_ab[i]);
if (func_data.e_ab[i] < mineab) {
mineab = func_data.e_ab[i];
func_data.idxmin = i;
}
}
fclose(fn);
for (i = 0; i < func_data.ndat; i++) {
func_data.e_ab[i] -= mineab;
}
gen_srpgeo(func_data.ndat, data);
FILE * fr;
fr = fopen("./parameter_ref", "r");
if (!fr)
exit(EXIT_FAILURE);
while ( (ch = fgetc(fn)) != EOF) {
if (ch == '\n') {
func_data.pardim++;
}
}
rewind(fr);
func_data.param_names = malloc(func_data.pardim * 10 * sizeof(char));
func_data.param_atoms = malloc(func_data.pardim * 10 * sizeof(char));
double param_values[func_data.pardim];
double value_upper[func_data.pardim];
double value_lower[func_data.pardim];
for (int i = 0; i < func_data.pardim; ++i) {
fscanf(fr, "%s %s %lf", func_data.param_names[i],
func_data.param_atoms[i], ¶m_values[i]);
double tmp = (param_values[i] >= 0 ? pdev : -pdev);
value_upper[i] = param_values[i] * (1.0 + tmp);
value_lower[i] = param_values[i] * (1.0 - tmp);
}
fclose(fr);
FILE * fv;
fv = fopen("e_ab", "w");
for (int i = 0; i < func_data.ndat; ++i) {
fprintf(fv,"%lf\n", func_data.e_ab[i]);
}
fclose(fv);
// Optimization process
double minf;
nlopt_opt opt = nlopt_create(global_alg, func_data.pardim);
nlopt_set_local_optimizer(opt, nlopt_create(local_alg,
func_data.pardim));
nlopt_set_lower_bounds(opt, value_lower);
nlopt_set_upper_bounds(opt, value_upper);
nlopt_set_min_objective(opt, opt_me, &func_data);
nlopt_set_maxeval(opt, maxeval);
nlopt_set_stopval(opt, minrms);
nlopt_set_ftol_abs(opt, tol);
int dbg = nlopt_optimize(opt, param_values, &minf);
if (dbg < 0) {
fprintf(stderr, "%s:%d %s -> Nlopt C function failed: %d expected: %d\n"
,__FILE__ , __LINE__, __FUNCTION__, dbg, NLOPT_SUCCESS);
} else {
printf("Found minimum %lf\n", minf);
printf("At this point:\n");
for (int i = 0; i < func_data.pardim; ++i) {
printf("%lf\n",param_values[i]);
}
}
// Cleaning up stuff
nlopt_destroy(opt);
for (i = 0; i < func_data.ndat; i++) {
free(data[i]);
}
free(data);
free(func_data.e_ab);
free(func_data.param_names);
free(func_data.param_atoms);
return 0;
}
|
vector.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Member functions for hypre_Vector class.
*
*****************************************************************************/
#include "seq_mv.h"
#include "_hypre_utilities.hpp" //RL: TODO vector_device.c, include cuda there
/*--------------------------------------------------------------------------
* hypre_SeqVectorCreate
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorCreate( HYPRE_Int size )
{
hypre_Vector *vector;
vector = hypre_CTAlloc(hypre_Vector, 1, HYPRE_MEMORY_HOST);
hypre_VectorData(vector) = NULL;
hypre_VectorSize(vector) = size;
hypre_VectorNumVectors(vector) = 1;
hypre_VectorMultiVecStorageMethod(vector) = 0;
/* set defaults */
hypre_VectorOwnsData(vector) = 1;
hypre_VectorMemoryLocation(vector) = hypre_HandleMemoryLocation(hypre_handle());
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultiVectorCreate
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqMultiVectorCreate( HYPRE_Int size, HYPRE_Int num_vectors )
{
hypre_Vector *vector = hypre_SeqVectorCreate(size);
hypre_VectorNumVectors(vector) = num_vectors;
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorDestroy( hypre_Vector *vector )
{
HYPRE_Int ierr=0;
if (vector)
{
HYPRE_MemoryLocation memory_location = hypre_VectorMemoryLocation(vector);
if ( hypre_VectorOwnsData(vector) )
{
hypre_TFree(hypre_VectorData(vector), memory_location);
}
hypre_TFree(vector, HYPRE_MEMORY_HOST);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorInitialize_v2( hypre_Vector *vector, HYPRE_MemoryLocation memory_location )
{
HYPRE_Int size = hypre_VectorSize(vector);
HYPRE_Int ierr = 0;
HYPRE_Int num_vectors = hypre_VectorNumVectors(vector);
HYPRE_Int multivec_storage_method = hypre_VectorMultiVecStorageMethod(vector);
hypre_VectorMemoryLocation(vector) = memory_location;
/* Caveat: for pre-existing data, the memory location must be guaranteed
* to be consistent with `memory_location'
* Otherwise, mismatches will exist and problems will be encountered
* when being used, and freed */
if ( !hypre_VectorData(vector) )
{
hypre_VectorData(vector) = hypre_CTAlloc(HYPRE_Complex, num_vectors*size, memory_location);
}
if ( multivec_storage_method == 0 )
{
hypre_VectorVectorStride(vector) = size;
hypre_VectorIndexStride(vector) = 1;
}
else if ( multivec_storage_method == 1 )
{
hypre_VectorVectorStride(vector) = 1;
hypre_VectorIndexStride(vector) = num_vectors;
}
else
{
++ierr;
}
return ierr;
}
HYPRE_Int
hypre_SeqVectorInitialize( hypre_Vector *vector )
{
HYPRE_Int ierr;
ierr = hypre_SeqVectorInitialize_v2( vector, hypre_VectorMemoryLocation(vector) );
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetDataOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorSetDataOwner( hypre_Vector *vector,
HYPRE_Int owns_data )
{
HYPRE_Int ierr=0;
hypre_VectorOwnsData(vector) = owns_data;
return ierr;
}
/*--------------------------------------------------------------------------
* ReadVector
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorRead( char *file_name )
{
hypre_Vector *vector;
FILE *fp;
HYPRE_Complex *data;
HYPRE_Int size;
HYPRE_Int j;
/*----------------------------------------------------------
* Read in the data
*----------------------------------------------------------*/
fp = fopen(file_name, "r");
hypre_fscanf(fp, "%d", &size);
vector = hypre_SeqVectorCreate(size);
hypre_VectorMemoryLocation(vector) = HYPRE_MEMORY_HOST;
hypre_SeqVectorInitialize(vector);
data = hypre_VectorData(vector);
for (j = 0; j < size; j++)
{
hypre_fscanf(fp, "%le", &data[j]);
}
fclose(fp);
/* multivector code not written yet */
hypre_assert( hypre_VectorNumVectors(vector) == 1 );
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorPrint( hypre_Vector *vector,
char *file_name )
{
FILE *fp;
HYPRE_Complex *data;
HYPRE_Int size, num_vectors, vecstride, idxstride;
HYPRE_Int i, j;
HYPRE_Complex value;
HYPRE_Int ierr = 0;
num_vectors = hypre_VectorNumVectors(vector);
vecstride = hypre_VectorVectorStride(vector);
idxstride = hypre_VectorIndexStride(vector);
/*----------------------------------------------------------
* Print in the data
*----------------------------------------------------------*/
data = hypre_VectorData(vector);
size = hypre_VectorSize(vector);
fp = fopen(file_name, "w");
if ( hypre_VectorNumVectors(vector) == 1 )
{
hypre_fprintf(fp, "%d\n", size);
}
else
{
hypre_fprintf(fp, "%d vectors of size %d\n", num_vectors, size );
}
if ( num_vectors>1 )
{
for ( j=0; j<num_vectors; ++j )
{
hypre_fprintf(fp, "vector %d\n", j );
for (i = 0; i < size; i++)
{
value = data[ j*vecstride + i*idxstride ];
#ifdef HYPRE_COMPLEX
hypre_fprintf(fp, "%.14e , %.14e\n",
hypre_creal(value), hypre_cimag(value));
#else
hypre_fprintf(fp, "%.14e\n", value);
#endif
}
}
}
else
{
for (i = 0; i < size; i++)
{
#ifdef HYPRE_COMPLEX
hypre_fprintf(fp, "%.14e , %.14e\n",
hypre_creal(data[i]), hypre_cimag(data[i]));
#else
hypre_fprintf(fp, "%.14e\n", data[i]);
#endif
}
}
fclose(fp);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetConstantValues
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorSetConstantValues( hypre_Vector *v,
HYPRE_Complex value )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *vector_data = hypre_VectorData(v);
HYPRE_Int size = hypre_VectorSize(v);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(v);
//hypre_SeqVectorPrefetch(v, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
if (size > 0)
{
HYPRE_THRUST_CALL( fill_n, vector_data, size, value );
}
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(vector_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
vector_data[i] = value;
}
#endif /* defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetRandomValues
*
* returns vector of values randomly distributed between -1.0 and +1.0
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorSetRandomValues( hypre_Vector *v,
HYPRE_Int seed )
{
HYPRE_Complex *vector_data = hypre_VectorData(v);
HYPRE_Int size = hypre_VectorSize(v);
HYPRE_Int i;
HYPRE_Int ierr = 0;
hypre_SeedRand(seed);
size *= hypre_VectorNumVectors(v);
if (hypre_GetActualMemLocation(hypre_VectorMemoryLocation(v)) == hypre_MEMORY_HOST)
{
/* RDF: threading this loop may cause problems because of hypre_Rand() */
for (i = 0; i < size; i++)
{
vector_data[i] = 2.0 * hypre_Rand() - 1.0;
}
}
else
{
HYPRE_Complex *h_data = hypre_TAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
for (i = 0; i < size; i++)
{
h_data[i] = 2.0 * hypre_Rand() - 1.0;
}
hypre_TMemcpy(vector_data, h_data, HYPRE_Complex, size, hypre_VectorMemoryLocation(v), HYPRE_MEMORY_HOST);
hypre_TFree(h_data, HYPRE_MEMORY_HOST);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCopy
* copies data from x to y
* if size of x is larger than y only the first size_y elements of x are
* copied to y
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorCopy( hypre_Vector *x,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Int ierr = 0;
size_t size = hypre_min( hypre_VectorSize(x), hypre_VectorSize(y) ) * hypre_VectorNumVectors(x);
hypre_TMemcpy( hypre_VectorData(y),
hypre_VectorData(x),
HYPRE_Complex,
size,
hypre_VectorMemoryLocation(y),
hypre_VectorMemoryLocation(x) );
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCloneDeep
* Returns a complete copy of x - a deep copy, with its own copy of the data.
*--------------------------------------------------------------------------*/
hypre_Vector*
hypre_SeqVectorCloneDeep_v2( hypre_Vector *x, HYPRE_MemoryLocation memory_location )
{
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
hypre_Vector *y = hypre_SeqMultiVectorCreate( size, num_vectors );
hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x);
hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x);
hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x);
hypre_SeqVectorInitialize_v2(y, memory_location);
hypre_SeqVectorCopy( x, y );
return y;
}
hypre_Vector*
hypre_SeqVectorCloneDeep( hypre_Vector *x )
{
return hypre_SeqVectorCloneDeep_v2(x, hypre_VectorMemoryLocation(x));
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCloneShallow
* Returns a complete copy of x - a shallow copy, pointing the data of x
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorCloneShallow( hypre_Vector *x )
{
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors );
hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x);
hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x);
hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x);
hypre_VectorMemoryLocation(y) = hypre_VectorMemoryLocation(x);
hypre_VectorData(y) = hypre_VectorData(x);
hypre_SeqVectorSetDataOwner( y, 0 );
hypre_SeqVectorInitialize(y);
return y;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorScale
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorScale( HYPRE_Complex alpha,
hypre_Vector *y )
{
/* special cases */
if (alpha == 1.0)
{
return 0;
}
if (alpha == 0.0)
{
return hypre_SeqVectorSetConstantValues(y, 0.0);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(y);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(y);
//hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
#if defined(HYPRE_USING_CUBLAS)
HYPRE_CUBLAS_CALL( cublasDscal(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, y_data, 1) );
#else
HYPRE_THRUST_CALL( transform, y_data, y_data + size, y_data, alpha * _1 );
#endif
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] *= alpha;
}
#endif /* defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorAxpy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorAxpy( HYPRE_Complex alpha,
hypre_Vector *x,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(x);
//hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
#if defined(HYPRE_USING_CUBLAS)
HYPRE_CUBLAS_CALL( cublasDaxpy(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, x_data, 1, y_data, 1) );
#else
HYPRE_THRUST_CALL( transform, x_data, x_data + size, y_data, y_data, alpha * _1 + _2 );
#endif
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha * x_data[i];
}
#endif /* defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorInnerProd
*--------------------------------------------------------------------------*/
HYPRE_Real
hypre_SeqVectorInnerProd( hypre_Vector *x,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Real result = 0.0;
size *= hypre_VectorNumVectors(x);
//hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
#ifndef HYPRE_COMPLEX
#if defined(HYPRE_USING_CUBLAS)
HYPRE_CUBLAS_CALL( cublasDdot(hypre_HandleCublasHandle(hypre_handle()), size, x_data, 1, y_data, 1, &result) );
#else
result = HYPRE_THRUST_CALL( inner_product, x_data, x_data + size, y_data, 0.0 );
#endif
#else
/* TODO */
#error "Complex inner product"
#endif
#else /* #if defined(HYPRE_USING_CUDA) */
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) reduction(+:result) is_device_ptr(y_data,x_data) map(result)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:result) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
result += hypre_conj(y_data[i]) * x_data[i];
}
#endif /* defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return result;
}
//TODO
/*--------------------------------------------------------------------------
* hypre_VectorSumElts:
* Returns the sum of all vector elements.
*--------------------------------------------------------------------------*/
HYPRE_Complex hypre_SeqVectorSumElts( hypre_Vector *vector )
{
HYPRE_Complex sum = 0;
HYPRE_Complex *data = hypre_VectorData( vector );
HYPRE_Int size = hypre_VectorSize( vector );
HYPRE_Int i;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE
#endif
for ( i=0; i<size; ++i ) sum += data[i];
return sum;
}
HYPRE_Int
hypre_SeqVectorPrefetch( hypre_Vector *x, HYPRE_MemoryLocation memory_location)
{
HYPRE_Int ierr = 0;
#ifdef HYPRE_USING_UNIFIED_MEMORY
if (hypre_VectorMemoryLocation(x) != HYPRE_MEMORY_DEVICE)
{
/* hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! CUDA Prefetch with non-unified momory\n");*/
return 1;
}
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Int size = hypre_VectorSize(x) * hypre_VectorNumVectors(x);
if (size == 0)
{
return ierr;
}
hypre_MemPrefetch(x_data, sizeof(HYPRE_Complex)*size, memory_location);
#endif
return ierr;
}
#if 0
/* y[i] = max(alpha*x[i], beta*y[i]) */
HYPRE_Int
hypre_SeqVectorMax( HYPRE_Complex alpha,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(x);
//hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
thrust::maximum<HYPRE_Complex> mx;
#if defined(HYPRE_USING_CUDA)
HYPRE_THRUST_CALL( transform,
thrust::make_transform_iterator(x_data, alpha * _1),
thrust::make_transform_iterator(x_data + size, alpha * _1),
thrust::make_transform_iterator(y_data, beta * _1),
y_data,
mx );
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += hypre_max(alpha * x_data[i], beta * y_data[i]);
}
#endif /* defined(HYPRE_USING_CUDA) */
hypre_SyncCudaComputeStream(hypre_handle());
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
#endif
|
hypre_memory.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
*
* Memory management utilities
*
*****************************************************************************/
#include "_hypre_utilities.h"
#include "../struct_mv/_hypre_struct_mv.h"
#ifdef HYPRE_USE_UMALLOC
#undef HYPRE_USE_UMALLOC
#endif
/* global variables for device OpenMP */
#if defined(HYPRE_USING_DEVICE_OPENMP)
HYPRE_Int hypre__global_offload = 0;
HYPRE_Int hypre__offload_device_num;
HYPRE_Int hypre__offload_host_num;
/* stats */
size_t hypre__target_allc_count = 0;
size_t hypre__target_free_count = 0;
size_t hypre__target_allc_bytes = 0;
size_t hypre__target_free_bytes = 0;
size_t hypre__target_htod_count = 0;
size_t hypre__target_dtoh_count = 0;
size_t hypre__target_htod_bytes = 0;
size_t hypre__target_dtoh_bytes = 0;
#endif
/******************************************************************************
*
* Helper routines
*
*****************************************************************************/
/*--------------------------------------------------------------------------
* hypre_RedefMemLocation
* Redefine location based on the selected memory model in hypre_memory.h
*--------------------------------------------------------------------------*/
static inline HYPRE_Int hypre_RedefMemLocation(HYPRE_Int location)
{
if (location == HYPRE_MEMORY_HOST)
{
return HYPRE_MEMORY_HOST_ACT;
}
if (location == HYPRE_MEMORY_DEVICE)
{
return HYPRE_MEMORY_DEVICE_ACT;
}
if (location == HYPRE_MEMORY_SHARED)
{
return HYPRE_MEMORY_SHARED_ACT;
}
if (location == HYPRE_MEMORY_HOST_PINNED)
{
return HYPRE_MEMORY_HOST_PINNED_ACT;
}
return HYPRE_MEMORY_UNSET;
}
/*--------------------------------------------------------------------------
* hypre_OutOfMemory
*--------------------------------------------------------------------------*/
static inline void
hypre_OutOfMemory(size_t size)
{
hypre_error_w_msg(HYPRE_ERROR_MEMORY,"Out of memory trying to allocate too many bytes\n");
fflush(stdout);
}
static inline void
hypre_WrongMemoryLocation()
{
hypre_error_w_msg(HYPRE_ERROR_MEMORY,"Wrong HYPRE MEMORY location: \n Only HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_SHARED,\n and HYPRE_MEMORY_HOST_PINNED are supported!\n");
fflush(stdout);
}
/*--------------------------------------------------------------------------
* hypre_GetPadMemsize:
* Device/HostPinned malloc stores the size in bytes at the beginning size_t
*--------------------------------------------------------------------------*/
static inline size_t
hypre_GetPadMemsize(void *ptr, HYPRE_Int location)
{
location = hypre_RedefMemLocation(location);
/* no stored size for host memory */
if (location == HYPRE_MEMORY_HOST)
{
return 0;
}
size_t *sp = (size_t*) ptr - HYPRE_MEM_PAD_LEN;
if (location == HYPRE_MEMORY_DEVICE)
{
/* special case for mapped device openmp; size available on host memory */
#if defined(HYPRE_DEVICE_OPENMP_MAPPED)
return *sp;
#else
/* copy size from device memory */
size_t size;
hypre_Memcpy(&size, sp, sizeof(size_t), HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
return size;
#endif
}
if (location == HYPRE_MEMORY_SHARED)
{
return *sp;
}
if (location == HYPRE_MEMORY_HOST_PINNED)
{
return *sp;
}
hypre_WrongMemoryLocation();
/* no stored size for host memory */
return 0;
}
/******************************************************************************
*
* Standard routines
*
*****************************************************************************/
/*--------------------------------------------------------------------------
* hypre_MAlloc
*--------------------------------------------------------------------------*/
static inline void *
hypre_HostMalloc(size_t size, HYPRE_Int zeroinit)
{
void *ptr = NULL;
if (zeroinit)
{
ptr = calloc(size, 1);
}
else
{
ptr = malloc(size);
}
return ptr;
}
static inline void *
hypre_DeviceMalloc(size_t size, HYPRE_Int zeroinit)
{
void *ptr = NULL;
/* without UM, device alloc */
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
/* omp target alloc */
ptr = omp_target_alloc(size + sizeof(size_t)*HYPRE_MEM_PAD_LEN, hypre__offload_device_num);
size_t *sp = (size_t*) ptr;
#pragma omp target is_device_ptr(sp)
{
sp[0] = size;
}
ptr = (void*) (&sp[HYPRE_MEM_PAD_LEN]);
#elif defined(HYPRE_USING_DEVICE_OPENMP)
/* omp target map */
ptr = malloc(size + sizeof(size_t)*HYPRE_MEM_PAD_LEN);
size_t *sp = (size_t*) ptr;
sp[0] = size;
ptr = (void *) (&sp[HYPRE_MEM_PAD_LEN]);
HYPRE_OMPOffload(hypre__offload_device_num, ptr, size, "enter", "alloc");
#elif defined(HYPRE_USING_CUDA)
/* cudaMalloc */
hypre_CheckErrorDevice( cudaMalloc(&ptr, size + sizeof(size_t)*HYPRE_MEM_PAD_LEN) );
hypre_CheckErrorDevice( cudaDeviceSynchronize() );
hypre_Memcpy(ptr, &size, sizeof(size_t), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
size_t *sp = (size_t*) ptr;
ptr = (void*) (&sp[HYPRE_MEM_PAD_LEN]);
#endif
/* after device alloc, memset to 0 */
if (ptr && zeroinit)
{
hypre_Memset(ptr, 0, size, HYPRE_MEMORY_DEVICE);
}
return ptr;
}
static inline void *
hypre_UnifiedMalloc(size_t size, HYPRE_Int zeroinit)
{
void *ptr = NULL;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
size_t count = size + sizeof(size_t)*HYPRE_MEM_PAD_LEN;
/* with UM, managed memory alloc */
hypre_CheckErrorDevice( cudaMallocManaged(&ptr, count, CUDAMEMATTACHTYPE) );
hypre_CheckErrorDevice( cudaMemAdvise(ptr, count, cudaMemAdviseSetPreferredLocation, HYPRE_DEVICE) );
size_t *sp = (size_t*) ptr;
sp[0] = size;
ptr = (void*) (&sp[HYPRE_MEM_PAD_LEN]);
/* after UM alloc, memset to 0 */
if (zeroinit)
{
hypre_Memset(ptr, 0, size, HYPRE_MEMORY_SHARED);
}
#endif
return ptr;
}
static inline void *
hypre_HostPinnedMalloc(size_t size, HYPRE_Int zeroinit)
{
void *ptr = NULL;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
/* TODO which one of the following two? */
/* hypre_CheckErrorDevice( cudaHostAlloc(&ptr,size + sizeof(size_t)*HYPRE_MEM_PAD_LEN,
cudaHostAllocMapped)); */
hypre_CheckErrorDevice( cudaMallocHost(&ptr, size + sizeof(size_t)*HYPRE_MEM_PAD_LEN) );
size_t *sp = (size_t*) ptr;
sp[0] = size;
ptr = (void*) (&sp[HYPRE_MEM_PAD_LEN]);
/* after host alloc, memset to 0 */
if (zeroinit)
{
hypre_Memset(ptr, 0, size, HYPRE_MEMORY_HOST_PINNED);
}
#endif
return ptr;
}
static inline void *
hypre_MAlloc_core(size_t size, HYPRE_Int zeroinit, HYPRE_Int location)
{
if (size == 0)
{
return NULL;
}
void *ptr = NULL;
location = hypre_RedefMemLocation(location);
switch (location)
{
case HYPRE_MEMORY_HOST :
/* ask for cpu memory */
ptr = hypre_HostMalloc(size, zeroinit);
break;
case HYPRE_MEMORY_DEVICE :
/* ask for device memory */
ptr = hypre_DeviceMalloc(size, zeroinit);
break;
case HYPRE_MEMORY_SHARED :
/* ask for unified memory */
ptr = hypre_UnifiedMalloc(size, zeroinit);
break;
case HYPRE_MEMORY_HOST_PINNED :
/* ask for page-locked memory on the host */
ptr = hypre_HostPinnedMalloc(size, zeroinit);
break;
default :
/* unrecognized location */
hypre_WrongMemoryLocation();
}
if (!ptr)
{
hypre_OutOfMemory(size);
exit(0);
}
return ptr;
}
void *
hypre_MAlloc(size_t size, HYPRE_Int location)
{
return hypre_MAlloc_core(size, 0, location);
}
void *
hypre_CAlloc( size_t count, size_t elt_size, HYPRE_Int location)
{
return hypre_MAlloc_core(count * elt_size, 1, location);
}
/*--------------------------------------------------------------------------
* hypre_Free
*--------------------------------------------------------------------------*/
static inline void
hypre_HostFree(void *ptr)
{
free(ptr);
}
static inline void
hypre_DeviceFree(void *ptr)
{
/* without UM, device free */
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
size_t *sp = (size_t *) ptr;
ptr = (void *) (&sp[-HYPRE_MEM_PAD_LEN]);
omp_target_free(ptr, hypre__offload_device_num);
#elif defined(HYPRE_USING_DEVICE_OPENMP)
size_t size = ((size_t *) ptr)[-HYPRE_MEM_PAD_LEN];
HYPRE_OMPOffload(hypre__offload_device_num, ptr, size, "exit", "delete");
#elif defined(HYPRE_USING_CUDA)
/* cudaFree((size_t *) ptr - HYPRE_MEM_PAD_LEN); */
cudaSafeFree(ptr, HYPRE_MEM_PAD_LEN);
#endif
}
static inline void
hypre_UnifiedFree(void *ptr)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
/* with UM, managed memory free */
/* cudaFree((size_t *) ptr - HYPRE_MEM_PAD_LEN); */
cudaSafeFree(ptr, HYPRE_MEM_PAD_LEN);
#endif
}
static inline void
hypre_HostPinnedFree(void *ptr)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
/* page-locked memory on the host */
/* cudaFreeHost((size_t *) ptr - HYPRE_MEM_PAD_LEN); */
cudaSafeFree(ptr, HYPRE_MEM_PAD_LEN);
#endif
}
void
hypre_Free(void *ptr, HYPRE_Int location)
{
if (!ptr)
{
return;
}
location = hypre_RedefMemLocation(location);
switch (location)
{
case HYPRE_MEMORY_HOST :
/* free cpu memory */
hypre_HostFree(ptr);
break;
case HYPRE_MEMORY_DEVICE :
/* free device memory */
hypre_DeviceFree(ptr);
break;
case HYPRE_MEMORY_SHARED :
/* free unified memory */
hypre_UnifiedFree(ptr);
break;
case HYPRE_MEMORY_HOST_PINNED :
/* free host page-locked memory */
hypre_HostPinnedFree(ptr);
break;
default :
/* unrecognized location */
hypre_WrongMemoryLocation();
}
}
/*--------------------------------------------------------------------------
* hypre_ReAlloc
*--------------------------------------------------------------------------*/
static inline void *
hypre_HostReAlloc(void *ptr, size_t size)
{
return realloc(ptr, size);
}
static inline void *
hypre_Device_Unified_HostPinned_ReAlloc(void *ptr, size_t size, HYPRE_Int location)
{
/* device/unified/hostpinned memory realloc: malloc+copy+free */
void *new_ptr = hypre_MAlloc(size, location);
size_t old_size = hypre_GetPadMemsize(ptr, location);
size_t smaller_size = size > old_size ? old_size : size;
hypre_Memcpy(new_ptr, ptr, smaller_size, location, location);
hypre_Free(ptr, location);
return new_ptr;
}
void *
hypre_ReAlloc(void *ptr, size_t size, HYPRE_Int location)
{
location = hypre_RedefMemLocation(location);
if (size == 0)
{
hypre_Free(ptr, location);
return NULL;
}
if (ptr == NULL)
{
return hypre_MAlloc(size, location);
}
switch (location)
{
case HYPRE_MEMORY_HOST :
/* realloc cpu memory */
ptr = hypre_HostReAlloc(ptr, size);
break;
case HYPRE_MEMORY_DEVICE :
/* realloc device memory */
case HYPRE_MEMORY_SHARED :
/* realloc unified memory */
case HYPRE_MEMORY_HOST_PINNED :
/* realloc host pinned memory */
ptr = hypre_Device_Unified_HostPinned_ReAlloc(ptr, size, location);
break;
default :
/* unrecognized location */
hypre_WrongMemoryLocation();
}
if (!ptr)
{
hypre_OutOfMemory(size);
}
return ptr;
}
/*--------------------------------------------------------------------------
* hypre_Memcpy
*--------------------------------------------------------------------------*/
void
hypre_Memcpy(void *dst, void *src, size_t size, HYPRE_Int loc_dst, HYPRE_Int loc_src)
{
if (dst == NULL || src == NULL)
{
return;
}
loc_dst = hypre_RedefMemLocation(loc_dst);
loc_src = hypre_RedefMemLocation(loc_src);
/* 4 x 4 = 16 cases = 9 + 2 + 2 + 2 + 1 */
/* 9: Host <-- Host, Host <-- Shared, Host <-- Pinned,
* Shared <-- Host, Shared <-- Shared, Shared <-- Pinned,
* Pinned <-- Host, Pinned <-- Shared, Pinned <-- Pinned.
* (i.e, without Device involved)
*/
if (loc_dst != HYPRE_MEMORY_DEVICE && loc_src != HYPRE_MEMORY_DEVICE)
{
memcpy(dst, src, size);
return;
}
/* 2: Shared <-- Device, Device <-- Shared */
if (loc_dst == HYPRE_MEMORY_SHARED || loc_src == HYPRE_MEMORY_SHARED)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
cudaMemcpy(dst, src, size, cudaMemcpyDeviceToDevice);
#endif
return;
}
/* 2: Device <-- Host, Device <-- Pinned */
if ( loc_dst == HYPRE_MEMORY_DEVICE && (loc_src == HYPRE_MEMORY_HOST || loc_src == HYPRE_MEMORY_HOST_PINNED) )
{
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
omp_target_memcpy(dst, src, size, 0, 0, hypre__offload_device_num, hypre__offload_host_num);
#elif defined(HYPRE_USING_DEVICE_OPENMP)
memcpy(dst, src, size);
HYPRE_OMPOffload(hypre__offload_device_num, dst, size, "update", "to");
#elif defined(HYPRE_USING_CUDA)
cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice);
#endif
return;
}
/* 2: Host <-- Device, Pinned <-- Device */
if ( (loc_dst == HYPRE_MEMORY_HOST || loc_dst == HYPRE_MEMORY_HOST_PINNED) && loc_src == HYPRE_MEMORY_DEVICE )
{
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
omp_target_memcpy(dst, src, size, 0, 0, hypre__offload_host_num, hypre__offload_device_num);
#elif defined(HYPRE_USING_DEVICE_OPENMP)
HYPRE_OMPOffload(hypre__offload_device_num, src, size, "update", "from");
memcpy(dst, src, size);
#elif defined(HYPRE_USING_CUDA)
cudaMemcpy( dst, src, size, cudaMemcpyDeviceToHost);
#endif
return;
}
/* 1: Device <-- Device */
if (loc_dst == HYPRE_MEMORY_DEVICE && loc_src == HYPRE_MEMORY_DEVICE)
{
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
omp_target_memcpy(dst, src, size, 0, 0, hypre__offload_device_num, hypre__offload_device_num);
#elif defined(HYPRE_USING_DEVICE_OPENMP)
HYPRE_OMPOffload(hypre__offload_device_num, src, size, "update", "from");
memcpy(dst, src, size);
HYPRE_OMPOffload(hypre__offload_device_num, dst, size, "update", "to");
#elif defined(HYPRE_USING_CUDA)
cudaMemcpy(dst, src, size, cudaMemcpyDeviceToDevice);
#endif
return;
}
hypre_WrongMemoryLocation();
}
/*--------------------------------------------------------------------------
* hypre_Memset
* "Sets the first num bytes of the block of memory pointed by ptr to the specified value
* (*** interpreted as an unsigned char ***)"
*--------------------------------------------------------------------------*/
void *
hypre_Memset(void *ptr, HYPRE_Int value, size_t num, HYPRE_Int location)
{
if (ptr == NULL || num == 0)
{
return ptr;
}
location = hypre_RedefMemLocation(location);
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
unsigned char *ucptr = (unsigned char *) ptr;
unsigned char ucvalue = (unsigned char) value;
#endif
switch (location)
{
case HYPRE_MEMORY_HOST :
/* memset cpu memory */
case HYPRE_MEMORY_HOST_PINNED :
/* memset host pinned memory */
memset(ptr, value, num);
break;
case HYPRE_MEMORY_DEVICE :
/* memset device memory */
#if defined(HYPRE_DEVICE_OPENMP_ALLOC)
#define DEVICE_VAR is_device_ptr(ucptr)
hypre_LoopBegin(num, k)
{
ucptr[k] = ucvalue;
}
hypre_LoopEnd()
#undef DEVICE_VAR
#elif defined(HYPRE_USING_DEVICE_OPENMP)
memset(ptr, value, num);
HYPRE_OMPOffload(hypre__offload_device_num, ptr, num, "update", "to");
#elif defined(HYPRE_USING_CUDA)
cudaMemset(ptr, value, num);
#endif
break;
case HYPRE_MEMORY_SHARED :
/* memset unified memory */
memset(ptr, value, num);
break;
default :
/* unrecognized location */
hypre_WrongMemoryLocation();
}
return ptr;
}
|
VolumetricAveragePooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/VolumetricAveragePooling.c"
#else
static inline void THNN_(VolumetricAveragePooling_shapeCheck)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int padT,
int padW,
int padH,
bool ceil_mode)
{
int64_t nslices;
int64_t itime;
int64_t iheight;
int64_t iwidth;
int64_t otime;
int64_t oheight;
int64_t owidth;
int ndim = input->nDimension;
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
if (input->nDimension == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
THArgCheck(kT > 0 && kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kT: %d kH: %d kW: %d",
kT, kH, kW);
THArgCheck(dT > 0 && dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dT: %d dH: %d dW: %d",
dT, dH, dW);
THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input,
"4D or 5D (batch mode) tensor expected for input, but got: %s");
THArgCheck(input->size[dimw] >= kW && input->size[dimh] >= kH
&& input->size[dimt] >= kT, 2,
"input image (T: %d H: %d W: %d) smaller than "
"kernel size (kT: %d kH: %d kW: %d)",
input->size[dimt], input->size[dimh], input->size[dimw],
kT, kH, kW);
// The second argument is argNumber... here is the index of padH.
THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 11,
"pad should not be greater than half of kernel size, but got "
"padT = %d, padW = %d, padH = %d, kT = %d, kW = %d, kH = %d",
padT, padW, padH, kT, kW, kH);
/* sizes */
nslices = input->size[dimN];
itime = input->size[dimt];
iheight = input->size[dimh];
iwidth = input->size[dimw];
if (ceil_mode) {
otime = (int64_t)(ceil((float)(itime - kT + 2*padT) / dT)) + 1;
oheight = (int64_t)(ceil((float)(iheight - kH + 2*padH) / dH)) + 1;
owidth = (int64_t)(ceil((float)(iwidth - kW + 2*padW) / dW)) + 1;
}
else
{
otime = (int64_t)(floor((float)(itime - kT + 2*padT) / dT)) + 1;
oheight = (int64_t)(floor((float)(iheight - kH + 2*padH) / dH)) + 1;
owidth = (int64_t)(floor((float)(iwidth - kW + 2*padW) / dW)) + 1;
}
if (padT || padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((otime - 1)*dT >= itime + padT)
--otime;
if ((oheight - 1)*dH >= iheight + padH)
--oheight;
if ((owidth - 1)*dW >= iwidth + padW)
--owidth;
}
if (otime < 1 || owidth < 1 || oheight < 1)
THError("Given input size: (%dx%dx%dx%d). "
"Calculated output size: (%dx%dx%dx%d). Output size is too small",
nslices,itime,iheight,iwidth,nslices,otime,oheight,owidth);
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimN, nslices);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, otime);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, oheight);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, owidth);
}
}
static void THNN_(VolumetricAveragePooling_updateOutput_frame)(
real *input_p,
real *output_p,
int64_t nslices,
int64_t itime,
int64_t iwidth,
int64_t iheight,
int64_t otime,
int64_t owidth,
int64_t oheight,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int padT,
int padW,
int padH,
bool count_include_pad)
{
int64_t k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
int64_t i, j, ti;
/* local pointers. */
real *ip = input_p + k * itime * iwidth * iheight;
real *op = output_p + k * otime * owidth * oheight;
for (i = 0; i < otime * oheight * owidth; ++i)
*(op + i) = 0;
/* loop over output */
for (ti = 0; ti < otime; ti++)
{
for (i = 0; i < oheight; i++)
{
for (j = 0; j < owidth; j++)
{
/* compute pool range. */
int64_t tstart = ti * dT - padT;
int64_t hstart = i * dH - padH;
int64_t wstart = j * dW - padW;
int64_t tend = fminf(tstart + kT, itime + padT);
int64_t hend = fminf(hstart + kH, iheight + padH);
int64_t wend = fminf(wstart + kW, iwidth + padW);
int64_t pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart);
tstart = fmaxf(tstart, 0);
hstart = fmaxf(hstart, 0);
wstart = fmaxf(wstart, 0);
tend = fmin(tend, itime);
hend = fmin(hend, iheight);
wend = fmin(wend, iwidth);
int divide_factor;
if (count_include_pad)
divide_factor = pool_size;
else
divide_factor = (tend - tstart) * (hend - hstart) * (wend - wstart);
/* compute local sum: */
real sum = 0.0;
int64_t x, y, z;
for (z = tstart; z < tend; z++)
{
for (y = hstart; y < hend; y++)
{
for (x = wstart; x < wend; x++)
{
sum += *(ip + z * iwidth * iheight + y * iwidth + x);
}
}
}
/* set output to local max */
*op++ += sum / divide_factor;
}
}
}
}
}
void THNN_(VolumetricAveragePooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int padT,
int padW,
int padH,
bool ceil_mode,
bool count_include_pad)
{
int64_t nslices;
int64_t itime;
int64_t iheight;
int64_t iwidth;
int64_t otime;
int64_t oheight;
int64_t owidth;
real *input_data;
real *output_data;
THNN_(VolumetricAveragePooling_shapeCheck)(
state, input, NULL, kT, kW, kH,
dT, dW, dH, padT, padW, padH, ceil_mode);
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
if (input->nDimension == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
/* sizes */
nslices = input->size[dimN];
itime = input->size[dimt];
iheight = input->size[dimh];
iwidth = input->size[dimw];
if (ceil_mode)
{
otime = (int64_t)(ceil((float)(itime - kT + 2*padT) / dT)) + 1;
oheight = (int64_t)(ceil((float)(iheight - kH + 2*padH) / dH)) + 1;
owidth = (int64_t)(ceil((float)(iwidth - kW + 2*padW) / dW)) + 1;
}
else
{
otime = (int64_t)(floor((float)(itime - kT + 2*padT) / dT)) + 1;
oheight = (int64_t)(floor((float)(iheight - kH + 2*padH) / dH)) + 1;
owidth = (int64_t)(floor((float)(iwidth - kW + 2*padW) / dW)) + 1;
}
if (padT || padH || padW)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((otime - 1)*dT >= itime + padT)
--otime;
if ((oheight - 1)*dH >= iheight + padH)
--oheight;
if ((owidth - 1)*dW >= iwidth + padW)
--owidth;
}
/* get contiguous input */
input = THTensor_(newContiguous)(input);
if (input->nDimension == 4) /* non-batch mode */
{
/* resize output */
THTensor_(resize4d)(output, nslices, otime, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
THNN_(VolumetricAveragePooling_updateOutput_frame)(
input_data, output_data, nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH,
padT, padW, padH,
count_include_pad
);
}
else /* batch mode */
{
int64_t p;
int64_t nBatch = input->size[0];
int64_t istride = nslices * itime * iwidth * iheight;
int64_t ostride = nslices * otime * owidth * oheight;
/* resize output */
THTensor_(resize5d)(output, nBatch, nslices, otime, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
#pragma omp parallel for private(p)
for (p=0; p < nBatch; p++)
{
THNN_(VolumetricAveragePooling_updateOutput_frame)(
input_data + p * istride, output_data + p * ostride, nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH,
padT, padW, padH,
count_include_pad
);
}
}
/* cleanup */
THTensor_(free)(input);
}
static void THNN_(VolumetricAveragePooling_updateGradInput_frame)(
real *gradInput_p,
real *gradOutput_p,
int64_t nslices,
int64_t itime,
int64_t iwidth,
int64_t iheight,
int64_t otime,
int64_t owidth,
int64_t oheight,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int padT,
int padW,
int padH,
bool count_include_pad)
{
int64_t k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
int64_t i, j, ti;
/* local pointers */
real *ip = gradInput_p + k * itime * iwidth * iheight;
real *op = gradOutput_p + k * otime * owidth * oheight;
for (i = 0; i < itime*iwidth*iheight; i++)
*(ip + i) = 0;
/* loop over output */
for (ti = 0; ti < otime; ti++)
{
for (i = 0; i < oheight; i++)
{
for (j = 0; j < owidth; j++)
{
int64_t tstart = ti * dT - padT;
int64_t hstart = i * dH - padH;
int64_t wstart = j * dW - padW;
int64_t tend = fminf(tstart + kT, itime + padT);
int64_t hend = fminf(hstart + kH, iheight + padH);
int64_t wend = fminf(wstart + kW, iwidth + padW);
int64_t pool_size = (tend -tstart) * (hend - hstart) * (wend - wstart);
tstart = fmaxf(tstart, 0);
hstart = fmaxf(hstart, 0);
wstart = fmaxf(wstart, 0);
tend = fminf(tend, itime);
hend = fminf(hend, iheight);
wend = fminf(wend, iwidth);
int64_t divide_factor;
if (count_include_pad)
divide_factor = pool_size;
else
divide_factor = (tend - tstart) * (hend - hstart) * (wend - wstart);
/* scatter gradients out to footprint: */
real val = *op++;
int64_t x,y,z;
for (z = tstart; z < tend; z++)
{
for (y = hstart; y < hend; y++)
{
for (x = wstart; x < wend; x++)
{
*(ip + z * iheight * iwidth + y * iwidth + x) += val / divide_factor;
}
}
}
}
}
}
}
}
void THNN_(VolumetricAveragePooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int padT,
int padW,
int padH,
bool ceil_mode,
bool count_include_pad)
{
int64_t nslices;
int64_t itime;
int64_t iheight;
int64_t iwidth;
int64_t otime;
int64_t oheight;
int64_t owidth;
real *gradInput_data;
real *gradOutput_data;
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
THNN_(VolumetricAveragePooling_shapeCheck)(
state, input, gradOutput, kT, kW, kH,
dT, dW, dH, padT, padW, padH, ceil_mode);
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->nDimension == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
/* sizes */
nslices = input->size[dimN];
itime = input->size[dimt];
iheight = input->size[dimh];
iwidth = input->size[dimw];
otime = gradOutput->size[dimt];
oheight = gradOutput->size[dimh];
owidth = gradOutput->size[dimw];
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
/* backprop */
if (input->nDimension == 4) /* non-batch mode*/
{
THNN_(VolumetricAveragePooling_updateGradInput_frame)(
gradInput_data, gradOutput_data, nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH,
padT, padW, padH,
count_include_pad
);
}
else /* batch mode */
{
int64_t p;
int64_t nBatch = input->size[0];
int64_t istride = nslices * itime * iwidth * iheight;
int64_t ostride = nslices * otime * owidth * oheight;
#pragma omp parallel for private(p)
for (p = 0; p < nBatch; p++)
{
THNN_(VolumetricAveragePooling_updateGradInput_frame)(
gradInput_data + p * istride, gradOutput_data + p * ostride, nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH,
padT, padW, padH,
count_include_pad
);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
}
#endif
|
vbcm.c | /*
** BCM - bipartite connectivity mapping
**
** G.Lohmann, MPI-KYB, Aug 2017
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "viaio/Vlib.h"
#include "viaio/VImage.h"
#include "viaio/mu.h"
#include "viaio/option.h"
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_vector.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_blas.h>
#include <gsl/gsl_linalg.h>
#include <gsl/gsl_sort.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_statistics.h>
#include <gsl/gsl_eigen.h>
#include <gsl/gsl_math.h>
extern void gsl_sort_vector_index(gsl_permutation *,gsl_vector *);
#define SQR(x) ((x) * (x))
#define ABS(x) ((x) > 0 ? (x) : -(x))
#ifdef _OPENMP
#include <omp.h>
#endif /*_OPENMP*/
extern gsl_matrix_float *DataMatrix(VImage *src,int nslices,VImage roi,int);
extern size_t NumVoxels(VImage src);
extern VImage VoxelMap(VImage roi);
typedef struct SpointStruct{
VShort x;
VShort y;
VShort z;
} SPoint;
VDictEntry MetricDict[] = {
{ "pearson", 0 },
{ "spearman", 1 },
{ "MI", 2 },
{ NULL }
};
VImage VCallocImage(int nslices,int nrows,int ncols,VRepnKind repn,VImage ref)
{
VImage image = VCreateImage(nslices,nrows,ncols,repn);
if (image == NULL) VError(" error allocating image");
VFillImage(image,VAllBands,0);
if (ref != NULL) VCopyImageAttrs (ref,image);
return image;
}
void ShowImage(gsl_matrix_float *A,char *filename)
{
int i,j;
int n=A->size1;
int m=A->size2;
VImage tstimage = VCallocImage(1,n,m,VFloatRepn,NULL);
for (i=0; i <n; i++) {
for (j=0; j<m; j++) {
VPixel(tstimage,0,i,j,VFloat) = gsl_matrix_float_get(A,i,j);
}
}
VAttrList out_list = VCreateAttrList();
VAppendAttr(out_list,"image",NULL,VImageRepn,tstimage);
FILE *out_file = fopen(filename,"w");
VWriteFile (out_file, out_list);
}
VImage GetRoi(SPoint addr,int nslices,int nrows,int ncols,int radius)
{
int i,b,r,c;
int rad2 = radius*radius+1;
int b0 = addr.z;
int r0 = addr.y;
int c0 = addr.x;
i=0;
for (b=b0-radius; b<=b0+radius; b++) {
for (r=r0-radius; r<=r0+radius; r++) {
for (c=c0-radius; c<=c0+radius; c++) {
if (SQR(b-b0) + SQR(r-r0) + SQR(c-c0) > rad2) continue;
i++;
}
}
}
VImage roi = VCreateImage(nslices,nrows,ncols,VBitRepn);
VFillImage(roi,VAllBands,0);
for (b=b0-radius; b<=b0+radius; b++) {
if (b < 0 || b >= nslices) VError(" b %d",b);
for (r=r0-radius; r<=r0+radius; r++) {
if (r < 0 || r >= nrows) VError(" r %d",r);
for (c=c0-radius; c<=c0+radius; c++) {
if (c < 0 || c >= ncols) VError(" c %d",c);
if (SQR(b-b0) + SQR(r-r0) + SQR(c-c0) > rad2) continue;
VPixel(roi,b,r,c,VBit) = 1;
}
}
}
return roi;
}
double Pearson(const float *data1,const float *data2,int n)
{
int i;
double corr=0;
for (i=0; i<n; i++) {
const double u = (double)data1[i];
const double v = (double)data2[i];
corr += u*v;
}
corr /= (double)n;
return corr;
}
double Spearman(const float *data1,const float *data2,int n)
{
int i;
double nx = (double)n;
double kx = nx*(nx*nx-1.0);
double sxy=0.0;
for (i=0; i<n; i++) {
const double u = (double)data1[i];
const double v = (double)data2[i];
const double d = (u-v);
sxy += d*d;
}
double rho = 1.0 - 6.0*sxy/kx;
return rho;
}
/* if X,Y are normalized Gaussians, then I=-0.5*log(1-r^2), with r covariance */
double MutualInformation(const float *data1,const float *data2,int nt)
{
int i;
double covar,mi,u;
double tiny=1.0e-10;
covar = 0;
for (i=0; i<nt; i++) {
const double u = data1[i];
const double v = data2[i];
covar += u*v;
}
const double nx= nt;
covar /= nx;
mi = 0;
u = 1.0 - covar*covar;
if (u > tiny) mi = -0.5 * log(u);
if (mi < 0) {
VWarning("MI: %f",mi);
mi = 0;
}
return mi;
}
double Correlation(const float *data1,const float *data2,int n,int metric)
{
double corr=0.0;
if (metric == 0) corr = Pearson(data1,data2,n);
if (metric == 1) corr = Spearman(data1,data2,n);
if (metric == 2) corr = MutualInformation(data1,data2,n);
if (gsl_isnan(corr) || gsl_isinf(corr)) corr = 0;
return corr;
}
void VectorNormalize(gsl_vector_float *x)
{
int i;
double nx = (float)x->size;
double sum1=0,sum2=0;
for (i=0; i<x->size; i++) {
sum1 += x->data[i];
sum2 += x->data[i]*x->data[i];
}
double mean = sum1/nx;
double sigma = sqrt((double)((sum2 - nx * mean * mean) / (nx - 1.0)));
for (i=0; i<x->size; i++) {
x->data[i] = (x->data[i] - mean)/sigma;
}
}
/* bipartite eigenvector centrality mapping */
void VBiadjacencyECM(gsl_matrix_float *C,gsl_vector_float *xa,gsl_vector_float *xb)
{
int na = C->size1;
int nb = C->size2;
int i,iter,maxiter=100;
double d=0,sum=0,sum_old=0;
/* ini */
gsl_vector_float *ya = gsl_vector_float_calloc(na);
gsl_vector_float *yb = gsl_vector_float_calloc(nb);
double nx = (double)(na+nb);
for (i=0; i<na; i++) xa->data[i] = 1.0/nx;
for (i=0; i<nb; i++) xb->data[i] = 1.0/nx;
/* iterations */
for (iter=0; iter<maxiter; iter++) {
gsl_blas_sgemv (CblasNoTrans,1.0,C,xb,0.0,ya);
gsl_blas_sgemv (CblasTrans,1.0,C,xa,0.0,yb);
sum = 0;
for (i=0; i<na; i++) sum += ya->data[i]*ya->data[i];
for (i=0; i<nb; i++) sum += yb->data[i]*yb->data[i];
sum = sqrt(sum);
for (i=0; i<na; i++) xa->data[i] = ya->data[i]/sum;
for (i=0; i<nb; i++) xb->data[i] = yb->data[i]/sum;
d = fabs(sum-sum_old);
if (iter > 0) fprintf(stderr," %5d %f\n",(int)iter,d);
if (d < 1.0e-8 && iter > 3) break;
if (iter > 5 && sum > sum_old) break;
sum_old = sum;
}
gsl_vector_float_free(ya);
gsl_vector_float_free(yb);
}
/* bipartite degree centrality mapping */
void VBiadjacencyDegreeMap(gsl_matrix_float *C,gsl_vector_float *xa,gsl_vector_float *xb)
{
size_t na = C->size1;
size_t nb = C->size2;
size_t i;
gsl_vector_float *ya = gsl_vector_float_calloc(na);
gsl_vector_float *yb = gsl_vector_float_calloc(nb);
for (i=0; i<na; i++) xa->data[i] = 1.0/(double)na;
for (i=0; i<nb; i++) xb->data[i] = 1.0/(double)nb;
gsl_blas_sgemv (CblasNoTrans,1.0,C,xb,0.0,ya);
gsl_blas_sgemv (CblasTrans,1.0,C,xa,0.0,yb);
gsl_vector_float_memcpy(xa,ya);
gsl_vector_float_memcpy(xb,yb);
gsl_vector_float_free(ya);
gsl_vector_float_free(yb);
}
/* project onto ROI */
void VNetworkProjection(gsl_matrix_float *C,gsl_vector_float *x)
{
size_t na = C->size1;
size_t nb = C->size2;
size_t i,n1=0,n2=0;
if (x->size == na) {
n1 = na;
n2 = nb;
}
else {
n1 = nb;
n2 = na;
}
float nx = (float)n2;
fprintf(stderr," network projection\n");
/* project bipartite graph onto ROI */
gsl_matrix_float *A = gsl_matrix_float_calloc(n1,n1);
if (!A) VError(" err allocating projection network");
int progress=0;
#pragma omp parallel for schedule(guided) firstprivate(C,A)
for (i=0; i<n1; i++) {
size_t j=0,k=0;
if (i%10 == 0) fprintf(stderr," %d0 of %lu\r",(int)(++progress),n1);
for (j=0; j<n1; j++) {
float w=0;
for (k=0; k<n2; k++) {
float u = gsl_matrix_float_get(C,i,k);
float v = gsl_matrix_float_get(C,j,k);
w += u*v;
}
#pragma omp critical
{
gsl_matrix_float_set(A,i,j,w/nx);
}
}
}
fprintf(stderr,"\n");
/* Hubs of projected network */
float w=0;
size_t j=0;
for (i=0; i<n1; i++) {
w=0;
for (j=0; j<n1; j++) {
w += gsl_matrix_float_get(A,i,j);
}
x->data[i] = w/nx;
}
gsl_matrix_float_free(A);
}
/* make matrix positive */
void VThresholdMatrix(gsl_matrix_float *C,float threshold)
{
int i,j;
size_t nneg=0,npos=0;
float u=0,tiny=1.0e-6;
double sum1=0,sum2=0;
double nx = (double)(C->size1 * C->size2);
for (i=0; i<C->size1; i++) {
for (j=0; j<C->size2; j++) {
u = gsl_matrix_float_get(C,i,j);
sum1 += u;
sum2 += u*u;
if (u < threshold+tiny) nneg++;
else npos++;
if (u < threshold+tiny) gsl_matrix_float_set(C,i,j,0.0);
}
}
if (npos < 1) VWarning(" Subthreshold correlations only");
double mean = sum1/nx;
double sigma = sqrt((double)((sum2 - nx * mean * mean) / (nx - 1.0)));
fprintf(stderr," matrix mean,std: %f %f, npos: %lu, nneg: %lu\n",mean,sigma,npos,nneg);
}
int main (int argc,char *argv[])
{
static SPoint addr1;
static SPoint addr2;
static VString roi1_filename = "";
static VString roi2_filename = "";
static VShort type=1;
static VFloat threshold = 0.0;
static VShort radius=5;
static VShort metric = 0;
static VBoolean plotboth=FALSE;
static VBoolean normalize=TRUE;
static VOptionDescRec options[] = {
{"roi1",VStringRepn,1,(VPointer) &roi1_filename,VOptionalOpt,NULL,"ROI 1"},
{"roi2",VStringRepn,1,(VPointer) &roi2_filename,VOptionalOpt,NULL,"ROI 2"},
{"seed1",VShortRepn,3,(VPointer) &addr1,VOptionalOpt,NULL,"Voxel address of seed point (x,y,z)"},
{"seed2",VShortRepn,3,(VPointer) &addr2,VOptionalOpt,NULL,"Voxel address of seed point (x,y,z)"},
{"type", VShortRepn,1,(VPointer) &type,VOptionalOpt,NULL,"Type, 0:ECM, 1: DCM, 2: project onto ROI1, 3: project onto ROI2"},
{"threshold", VFloatRepn,1,(VPointer) &threshold,VOptionalOpt,NULL,"Matrix threshold"},
{"metric",VShortRepn,1,(VPointer) &metric,VOptionalOpt,MetricDict,"Correlation metric"},
{"radius", VShortRepn,1,(VPointer) &radius,VOptionalOpt,NULL,"Radius around seed voxel"},
{"both", VBooleanRepn,1,(VPointer) &plotboth,VOptionalOpt,NULL,"Whether to plot results of both ROIs"},
{"normalize", VBooleanRepn,1,(VPointer) &normalize,VOptionalOpt,NULL,"Whether to normalize results"},
};
FILE *out_file=NULL;
VString in_file=NULL;
VImage roi1=NULL,roi2=NULL;
int i;
char *prg = GetLipsiaName("vbcm");
/* Parse command line arguments and identify files: */
VParseFilterCmdX (VNumber (options), options, argc, argv,&in_file,&out_file);
fprintf(stderr," type= %d\n",type);
/* read functional data */
VAttrList list = VReadAttrList(in_file,0L,TRUE,FALSE);
if (list == NULL) VError(" error reading input file %s",in_file);
/* get pointers to image data */
int nrows=0,ncols=0,nt=0;
int nslices = VAttrListNumImages(list);
VImage *src = VAttrListGetImages(list,nslices);
VImageDimensions(src,nslices,&nt,&nrows,&ncols);
fprintf(stderr," image dims: %d %d %d, nt: %d\n",nslices,nrows,ncols,nt);
/* omp-stuff */
#ifdef _OPENMP
int num_procs=omp_get_num_procs();
printf("using %d cores\n",(int)num_procs);
omp_set_num_threads(num_procs);
#endif /* _OPENMP */
/* ROI 1 */
if (strlen(roi1_filename) > 0) {
VAttrList list1 = VReadAttrList(roi1_filename,0L,TRUE,FALSE);
if (list1 == NULL) VError(" error reading roi file %s",roi1_filename);
roi1 = VReadImage(list1);
if (roi1 == NULL) VError(" err reading %s",roi1_filename);
}
/* ROI 2 */
if (strlen(roi2_filename) > 0) {
VAttrList list2 = VReadAttrList(roi2_filename,0L,TRUE,FALSE);
if (list2 == NULL) VError(" error reading roi file %s",roi2_filename);
roi2 = VReadImage(list2);
if (roi2 == NULL) VError(" err reading %s",roi2_filename);
}
/* ROIs from seed voxels */
if (roi1 == NULL) roi1 = GetRoi(addr1,nslices,nrows,ncols,radius);
if (roi2 == NULL) roi2 = GetRoi(addr2,nslices,nrows,ncols,radius);
/* voxel maps */
VImage map1 = VoxelMap(roi1);
VImage map2 = VoxelMap(roi2);
size_t nvox1 = NumVoxels(roi1);
size_t nvox2 = NumVoxels(roi2);
fprintf(stderr," nvox1: %lu, nvox2: %lu\n",nvox1,nvox2);
/* read data matrices */
gsl_matrix_float *X1 = DataMatrix(src,nslices,roi1,(int)metric);
gsl_matrix_float *X2 = DataMatrix(src,nslices,roi2,(int)metric);
/* correlation matrix */
fprintf(stderr," CorrMatrix...\n");
int rad2 = 3*3; /* exclusion radius */
gsl_matrix_float *A = gsl_matrix_float_calloc(nvox1,nvox2);
if (A == NULL) VError(" err allocating corr matrix, %d x %d",nvox1,nvox2);
int progress=0;
#pragma omp parallel for schedule(guided) firstprivate(A)
for (i=0; i<nvox1; i++) {
if (i%100 == 0) fprintf(stderr," %d00\r",(int)(++progress));
int bi = VPixel(map1,0,0,i,VShort);
int ri = VPixel(map1,0,1,i,VShort);
int ci = VPixel(map1,0,2,i,VShort);
int j=0;
float u=0;
float *tmp = (float *) VCalloc(nvox2,sizeof(float));
for (j=0; j<nvox2; j++) tmp[j] = -1.0;
const float *data1 = gsl_matrix_float_const_ptr(X1,i,0);
for (j=0; j<nvox2; j++) {
int bj = VPixel(map2,0,0,j,VShort);
int rj = VPixel(map2,0,1,j,VShort);
int cj = VPixel(map2,0,2,j,VShort);
int di = SQR(bi-bj) + SQR(ri-rj) + SQR(ci-cj);
if (di < rad2) continue;
const float *data2 = gsl_matrix_float_const_ptr(X2,j,0);
tmp[j] = Correlation(data1,data2,nt,(int)metric);
}
#pragma omp critical
{
for (j=0; j<nvox2; j++) {
u = tmp[j];
gsl_matrix_float_set(A,i,j,u);
}
}
VFree(tmp);
}
fprintf(stderr,"\n");
/*
ShowImage(A,"test.v");
exit(0);
*/
/* Biadjacency ECM */
fprintf(stderr," Biadjacency Connectivity...\n");
gsl_vector_float *xa = gsl_vector_float_calloc(nvox1);
gsl_vector_float *xb = gsl_vector_float_calloc(nvox2);
fprintf(stderr," type= %d\n",type);
VThresholdMatrix(A,threshold);
switch(type) {
case 0:
VBiadjacencyECM(A,xa,xb);
break;
case 1:
VBiadjacencyDegreeMap(A,xa,xb);
break;
case 2:
VNetworkProjection(A,xa);
break;
case 3:
VNetworkProjection(A,xb);
break;
default:
VError(" unknown type");
}
/* normalize */
if (normalize) {
VectorNormalize(xa);
VectorNormalize(xb);
}
/* output */
VImage dest = VCallocImage(nslices,nrows,ncols,VFloatRepn,src[0]);
int b,r,c;
for (i=0; i<nvox1; i++) {
b = VPixel(map1,0,0,i,VShort);
r = VPixel(map1,0,1,i,VShort);
c = VPixel(map1,0,2,i,VShort);
VPixel(dest,b,r,c,VFloat) = 100.0*xa->data[i];
}
if (plotboth) {
for (i=0; i<nvox2; i++) {
b = VPixel(map2,0,0,i,VShort);
r = VPixel(map2,0,1,i,VShort);
c = VPixel(map2,0,2,i,VShort);
VPixel(dest,b,r,c,VFloat) = 100.0*xb->data[i];
}
}
/* write output to disk */
VAttrList out_list = VCreateAttrList();
VAttrList geolist = VGetGeoInfo(list);
if (geolist != NULL) {
double *D = VGetGeoDim(geolist,NULL);
D[0] = 3; /* 3D */
D[4] = 1;
VSetGeoDim(geolist,D);
VSetGeoInfo(geolist,out_list);
}
VHistory(VNumber(options),options,prg,&list,&out_list);
VAppendAttr(out_list,"image",NULL,VImageRepn,dest);
VWriteFile (out_file, out_list);
fclose(out_file);
fprintf (stderr, "%s: done. \n", argv[0]);
exit(0);
}
|
courtemanche_ramirez_nattel_1998.c | #include "courtemanche_ramirez_nattel_1998.h"
#include <stdlib.h>
real max_step;
real min_step;
real abstol;
real reltol;
bool adpt;
real *ode_dt, *ode_previous_dt, *ode_time_new;
GET_CELL_MODEL_DATA(init_cell_model_data) {
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
log_to_stdout_and_file("Using courtemanche_ramirez_nattel_1998 CPU model\n");
uint32_t num_cells = solver->original_num_cells;
solver->sv = (real*)malloc(NEQ*num_cells*sizeof(real));
max_step = solver->max_dt;
min_step = solver->min_dt;
abstol = solver->abs_tol;
reltol = solver->rel_tol;
adpt = solver->adaptive;
if(adpt) {
ode_dt = (real*)malloc(num_cells*sizeof(real));
OMP(parallel for)
for(int i = 0; i < num_cells; i++) {
ode_dt[i] = solver->min_dt;
}
ode_previous_dt = (real*)calloc(num_cells, sizeof(real));
ode_time_new = (real*)calloc(num_cells, sizeof(real));
log_to_stdout_and_file("Using Adaptive Euler model to solve the ODEs\n");
} else {
log_to_stdout_and_file("Using Euler model to solve the ODEs\n");
}
OMP(parallel for)
for(uint32_t i = 0; i < num_cells; i++) {
real *sv = &solver->sv[i * NEQ];
sv[0] = -8.118000e+01f; //V millivolt
sv[1] = 2.908000e-03f; //m dimensionless
sv[2] = 9.649000e-01f; //h dimensionless
sv[3] = 9.775000e-01f; //j dimensionless
sv[4] = 3.043000e-02f; //oa dimensionless
sv[5] = 9.992000e-01f; //oi dimensionless
sv[6] = 4.966000e-03f; //ua dimensionless
sv[7] = 9.986000e-01f; //ui dimensionless
sv[8] = 3.296000e-05f; //xr dimensionless
sv[9] = 1.869000e-02f; //xs dimensionless
sv[10] = 1.367000e-04f; //d dimensionless
sv[11] = 9.996000e-01f; //f dimensionless
sv[12] = 7.755000e-01f; //f_Ca dimensionless
sv[13] = 0.0; //u dimensionless
sv[14] = 1.000000e+00f; //v dimensionless
sv[15] = 9.992000e-01f; //w dimensionless
sv[16] = 1.117000e+01f; //Na_i millimolar
sv[17] = 1.390000e+02f; //K_i millimolar
sv[18] = 1.013000e-04f; //Ca_i millimolar
sv[19] = 1.488000e+00f; //Ca_up millimolar
sv[20] = 1.488000e+00f; //Ca_rel millimolar
}
}
SOLVE_MODEL_ODES(solve_model_odes_cpu) {
uint32_t sv_id;
size_t num_cells_to_solve = ode_solver->num_cells_to_solve;
uint32_t * cells_to_solve = ode_solver->cells_to_solve;
real *sv = ode_solver->sv;
real dt = ode_solver->min_dt;
uint32_t num_steps = ode_solver->num_steps;
#pragma omp parallel for private(sv_id)
for (u_int32_t i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
if(adpt) {
solve_forward_euler_cpu_adpt(sv + (sv_id * NEQ), stim_currents[i], current_t + dt, sv_id);
}
else {
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = dt*rDY[i] + rY[i];
}
void solve_forward_euler_cpu_adpt(real *sv, real stim_curr, real final_time, int sv_id) {
real rDY[NEQ];
real _tolerances_[NEQ];
real _aux_tol = 0.0;
//initializes the variables
real dt = ode_dt[sv_id];
real time_new = ode_time_new[sv_id];
real previous_dt = ode_previous_dt[sv_id];
real edos_old_aux_[NEQ];
real edos_new_euler_[NEQ];
real *_k1__ = (real*) malloc(sizeof(real)*NEQ);
real *_k2__ = (real*) malloc(sizeof(real)*NEQ);
real *_k_aux__;
const real _beta_safety_ = 0.8;
const real __tiny_ = pow(abstol, 2.0f);
if(time_new + dt > final_time) {
dt = final_time - time_new;
}
RHS_cpu(sv, rDY, stim_curr, dt);
time_new += dt;
for(int i = 0; i < NEQ; i++){
_k1__[i] = rDY[i];
}
int count = 0;
int count_limit = (final_time - time_new)/min_step;
int aux_count_limit = count_limit+2000000;
if(aux_count_limit > 0) {
count_limit = aux_count_limit;
}
while(1) {
for(int i = 0; i < NEQ; i++) {
//stores the old variables in a vector
edos_old_aux_[i] = sv[i];
//computes euler method
edos_new_euler_[i] = _k1__[i] * dt + edos_old_aux_[i];
//steps ahead to compute the rk2 method
sv[i] = edos_new_euler_[i];
}
time_new += dt;
RHS_cpu(sv, rDY, stim_curr, dt);
time_new -= dt;//step back
double greatestError = 0.0, auxError = 0.0;
for(int i = 0; i < NEQ; i++) {
// stores the new evaluation
_k2__[i] = rDY[i];
_aux_tol = fabs(edos_new_euler_[i]) * reltol;
_tolerances_[i] = (abstol > _aux_tol) ? abstol : _aux_tol;
// finds the greatest error between the steps
auxError = fabs(((dt / 2.0) * (_k1__[i] - _k2__[i])) / _tolerances_[i]);
greatestError = (auxError > greatestError) ? auxError : greatestError;
}
///adapt the time step
greatestError += __tiny_;
previous_dt = dt;
///adapt the time step
dt = _beta_safety_ * dt * sqrt(1.0f/greatestError);
if (time_new + dt > final_time) {
dt = final_time - time_new;
}
//it doesn't accept the solution
if ( count < count_limit && (greatestError >= 1.0f)) {
//restore the old values to do it again
for(int i = 0; i < NEQ; i++) {
sv[i] = edos_old_aux_[i];
}
count++;
//throw the results away and compute again
} else{//it accepts the solutions
count = 0;
if (dt < min_step) {
dt = min_step;
}
else if (dt > max_step && max_step != 0) {
dt = max_step;
}
if (time_new + dt > final_time) {
dt = final_time - time_new;
}
_k_aux__ = _k2__;
_k2__ = _k1__;
_k1__ = _k_aux__;
//it steps the method ahead, with euler solution
for(int i = 0; i < NEQ; i++){
sv[i] = edos_new_euler_[i];
}
if(time_new + previous_dt >= final_time) {
if((fabs(final_time - time_new) < 1.0e-5)) {
break;
} else if(time_new < final_time) {
dt = previous_dt = final_time - time_new;
time_new += previous_dt;
break;
} else {
dt = previous_dt = min_step;
time_new += (final_time - time_new);
printf("Error: %lf\n", final_time - time_new);
break;
}
} else {
time_new += previous_dt;
}
}
}
ode_dt[sv_id] = dt;
ode_time_new[sv_id] = time_new;
ode_previous_dt[sv_id] = previous_dt;
free(_k1__);
free(_k2__);
}
void RHS_cpu(const real *sv, real *rDY, real stim_current, real dt) {
//State variables
const real V_old_ = sv[0];
const real m_old_ = sv[1];
const real h_old_ = sv[2];
const real j_old_ = sv[3];
const real oa_old_ = sv[4];
const real oi_old_ = sv[5];
const real ua_old_ = sv[6];
const real ui_old_ = sv[7];
const real xr_old_ = sv[8];
const real xs_old_ = sv[9];
const real d_old_ = sv[10];
const real f_old_ = sv[11];
const real f_Ca_old_ = sv[12];
const real u_old_ = sv[13];
const real v_old_ = sv[14];
const real w_old_ = sv[15];
const real Na_i_old_ = sv[16];
const real K_i_old_ = sv[17];
const real Ca_i_old_ = sv[18];
const real Ca_up_old_ = sv[19];
const real Ca_rel_old_ = sv[20];
#include "courtemanche_ramirez_nattel_1998_common.inc.c"
}
|
z_solve.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB SP code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
//---------------------------------------------------------------------
// this function performs the solution of the approximate factorization
// step in the z-direction for all five matrix components
// simultaneously. The Thomas algorithm is employed to solve the
// systems for the z-lines. Boundary conditions are non-periodic
//---------------------------------------------------------------------
void z_solve()
{
int i, j, k, k1, k2, m;
double ru1, fac1, fac2;
//---------------------------------------------------------------------
// Prepare for z-solve, array redistribution
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(i,j,k,k1,k2,m, \
ru1,fac1,fac2)
for (j = 1; j <= ny2; j++) {
lhsinitj(nz2+1, nx2);
//---------------------------------------------------------------------
// Computes the left hand side for the three z-factors
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// first fill the lhs for the u-eigenvalue
//---------------------------------------------------------------------
for (i = 1; i <= nx2; i++) {
for (k = 0; k <= nz2+1; k++) {
ru1 = c3c4*rho_i[k][j][i];
cv[k] = ws[k][j][i];
rhos[k] = max(max(dz4+con43*ru1, dz5+c1c5*ru1), max(dzmax+ru1, dz1));
}
for (k = 1; k <= nz2; k++) {
lhs[k][i][0] = 0.0;
lhs[k][i][1] = -dttz2 * cv[k-1] - dttz1 * rhos[k-1];
lhs[k][i][2] = 1.0 + c2dttz1 * rhos[k];
lhs[k][i][3] = dttz2 * cv[k+1] - dttz1 * rhos[k+1];
lhs[k][i][4] = 0.0;
}
}
//---------------------------------------------------------------------
// add fourth order dissipation
//---------------------------------------------------------------------
for (i = 1; i <= nx2; i++) {
k = 1;
lhs[k][i][2] = lhs[k][i][2] + comz5;
lhs[k][i][3] = lhs[k][i][3] - comz4;
lhs[k][i][4] = lhs[k][i][4] + comz1;
k = 2;
lhs[k][i][1] = lhs[k][i][1] - comz4;
lhs[k][i][2] = lhs[k][i][2] + comz6;
lhs[k][i][3] = lhs[k][i][3] - comz4;
lhs[k][i][4] = lhs[k][i][4] + comz1;
}
for (k = 3; k <= nz2-2; k++) {
for (i = 1; i <= nx2; i++) {
lhs[k][i][0] = lhs[k][i][0] + comz1;
lhs[k][i][1] = lhs[k][i][1] - comz4;
lhs[k][i][2] = lhs[k][i][2] + comz6;
lhs[k][i][3] = lhs[k][i][3] - comz4;
lhs[k][i][4] = lhs[k][i][4] + comz1;
}
}
for (i = 1; i <= nx2; i++) {
k = nz2-1;
lhs[k][i][0] = lhs[k][i][0] + comz1;
lhs[k][i][1] = lhs[k][i][1] - comz4;
lhs[k][i][2] = lhs[k][i][2] + comz6;
lhs[k][i][3] = lhs[k][i][3] - comz4;
k = nz2;
lhs[k][i][0] = lhs[k][i][0] + comz1;
lhs[k][i][1] = lhs[k][i][1] - comz4;
lhs[k][i][2] = lhs[k][i][2] + comz5;
}
//---------------------------------------------------------------------
// subsequently, fill the other factors (u+c), (u-c)
//---------------------------------------------------------------------
for (k = 1; k <= nz2; k++) {
for (i = 1; i <= nx2; i++) {
lhsp[k][i][0] = lhs[k][i][0];
lhsp[k][i][1] = lhs[k][i][1] - dttz2 * speed[k-1][j][i];
lhsp[k][i][2] = lhs[k][i][2];
lhsp[k][i][3] = lhs[k][i][3] + dttz2 * speed[k+1][j][i];
lhsp[k][i][4] = lhs[k][i][4];
lhsm[k][i][0] = lhs[k][i][0];
lhsm[k][i][1] = lhs[k][i][1] + dttz2 * speed[k-1][j][i];
lhsm[k][i][2] = lhs[k][i][2];
lhsm[k][i][3] = lhs[k][i][3] - dttz2 * speed[k+1][j][i];
lhsm[k][i][4] = lhs[k][i][4];
}
}
//---------------------------------------------------------------------
// FORWARD ELIMINATION
//---------------------------------------------------------------------
for (k = 0; k <= grid_points[2]-3; k++) {
k1 = k + 1;
k2 = k + 2;
for (i = 1; i <= nx2; i++) {
fac1 = 1.0/lhs[k][i][2];
lhs[k][i][3] = fac1*lhs[k][i][3];
lhs[k][i][4] = fac1*lhs[k][i][4];
for (m = 0; m < 3; m++) {
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
}
lhs[k1][i][2] = lhs[k1][i][2] - lhs[k1][i][1]*lhs[k][i][3];
lhs[k1][i][3] = lhs[k1][i][3] - lhs[k1][i][1]*lhs[k][i][4];
for (m = 0; m < 3; m++) {
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhs[k1][i][1]*rhs[k][j][i][m];
}
lhs[k2][i][1] = lhs[k2][i][1] - lhs[k2][i][0]*lhs[k][i][3];
lhs[k2][i][2] = lhs[k2][i][2] - lhs[k2][i][0]*lhs[k][i][4];
for (m = 0; m < 3; m++) {
rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhs[k2][i][0]*rhs[k][j][i][m];
}
}
}
//---------------------------------------------------------------------
// The last two rows in this grid block are a bit different,
// since they for (not have two more rows available for the
// elimination of off-diagonal entries
//---------------------------------------------------------------------
k = grid_points[2]-2;
k1 = grid_points[2]-1;
for (i = 1; i <= nx2; i++) {
fac1 = 1.0/lhs[k][i][2];
lhs[k][i][3] = fac1*lhs[k][i][3];
lhs[k][i][4] = fac1*lhs[k][i][4];
for (m = 0; m < 3; m++) {
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
}
lhs[k1][i][2] = lhs[k1][i][2] - lhs[k1][i][1]*lhs[k][i][3];
lhs[k1][i][3] = lhs[k1][i][3] - lhs[k1][i][1]*lhs[k][i][4];
for (m = 0; m < 3; m++) {
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhs[k1][i][1]*rhs[k][j][i][m];
}
//---------------------------------------------------------------------
// scale the last row immediately
//---------------------------------------------------------------------
fac2 = 1.0/lhs[k1][i][2];
for (m = 0; m < 3; m++) {
rhs[k1][j][i][m] = fac2*rhs[k1][j][i][m];
}
}
//---------------------------------------------------------------------
// for (the u+c and the u-c factors
//---------------------------------------------------------------------
for (k = 0; k <= grid_points[2]-3; k++) {
k1 = k + 1;
k2 = k + 2;
for (i = 1; i <= nx2; i++) {
m = 3;
fac1 = 1.0/lhsp[k][i][2];
lhsp[k][i][3] = fac1*lhsp[k][i][3];
lhsp[k][i][4] = fac1*lhsp[k][i][4];
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
lhsp[k1][i][2] = lhsp[k1][i][2] - lhsp[k1][i][1]*lhsp[k][i][3];
lhsp[k1][i][3] = lhsp[k1][i][3] - lhsp[k1][i][1]*lhsp[k][i][4];
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsp[k1][i][1]*rhs[k][j][i][m];
lhsp[k2][i][1] = lhsp[k2][i][1] - lhsp[k2][i][0]*lhsp[k][i][3];
lhsp[k2][i][2] = lhsp[k2][i][2] - lhsp[k2][i][0]*lhsp[k][i][4];
rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhsp[k2][i][0]*rhs[k][j][i][m];
m = 4;
fac1 = 1.0/lhsm[k][i][2];
lhsm[k][i][3] = fac1*lhsm[k][i][3];
lhsm[k][i][4] = fac1*lhsm[k][i][4];
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
lhsm[k1][i][2] = lhsm[k1][i][2] - lhsm[k1][i][1]*lhsm[k][i][3];
lhsm[k1][i][3] = lhsm[k1][i][3] - lhsm[k1][i][1]*lhsm[k][i][4];
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsm[k1][i][1]*rhs[k][j][i][m];
lhsm[k2][i][1] = lhsm[k2][i][1] - lhsm[k2][i][0]*lhsm[k][i][3];
lhsm[k2][i][2] = lhsm[k2][i][2] - lhsm[k2][i][0]*lhsm[k][i][4];
rhs[k2][j][i][m] = rhs[k2][j][i][m] - lhsm[k2][i][0]*rhs[k][j][i][m];
}
}
//---------------------------------------------------------------------
// And again the last two rows separately
//---------------------------------------------------------------------
k = grid_points[2]-2;
k1 = grid_points[2]-1;
for (i = 1; i <= nx2; i++) {
m = 3;
fac1 = 1.0/lhsp[k][i][2];
lhsp[k][i][3] = fac1*lhsp[k][i][3];
lhsp[k][i][4] = fac1*lhsp[k][i][4];
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
lhsp[k1][i][2] = lhsp[k1][i][2] - lhsp[k1][i][1]*lhsp[k][i][3];
lhsp[k1][i][3] = lhsp[k1][i][3] - lhsp[k1][i][1]*lhsp[k][i][4];
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsp[k1][i][1]*rhs[k][j][i][m];
m = 4;
fac1 = 1.0/lhsm[k][i][2];
lhsm[k][i][3] = fac1*lhsm[k][i][3];
lhsm[k][i][4] = fac1*lhsm[k][i][4];
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
lhsm[k1][i][2] = lhsm[k1][i][2] - lhsm[k1][i][1]*lhsm[k][i][3];
lhsm[k1][i][3] = lhsm[k1][i][3] - lhsm[k1][i][1]*lhsm[k][i][4];
rhs[k1][j][i][m] = rhs[k1][j][i][m] - lhsm[k1][i][1]*rhs[k][j][i][m];
//---------------------------------------------------------------------
// Scale the last row immediately (some of this is overkill
// if this is the last cell)
//---------------------------------------------------------------------
rhs[k1][j][i][3] = rhs[k1][j][i][3]/lhsp[k1][i][2];
rhs[k1][j][i][4] = rhs[k1][j][i][4]/lhsm[k1][i][2];
}
//---------------------------------------------------------------------
// BACKSUBSTITUTION
//---------------------------------------------------------------------
k = grid_points[2]-2;
k1 = grid_points[2]-1;
for (i = 1; i <= nx2; i++) {
for (m = 0; m < 3; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[k][i][3]*rhs[k1][j][i][m];
}
rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[k][i][3]*rhs[k1][j][i][3];
rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[k][i][3]*rhs[k1][j][i][4];
}
//---------------------------------------------------------------------
// Whether or not this is the last processor, we always have
// to complete the back-substitution
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// The first three factors
//---------------------------------------------------------------------
for (k = grid_points[2]-3; k >= 0; k--) {
k1 = k + 1;
k2 = k + 2;
for (i = 1; i <= nx2; i++) {
for (m = 0; m < 3; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] -
lhs[k][i][3]*rhs[k1][j][i][m] -
lhs[k][i][4]*rhs[k2][j][i][m];
}
//-------------------------------------------------------------------
// And the remaining two
//-------------------------------------------------------------------
rhs[k][j][i][3] = rhs[k][j][i][3] -
lhsp[k][i][3]*rhs[k1][j][i][3] -
lhsp[k][i][4]*rhs[k2][j][i][3];
rhs[k][j][i][4] = rhs[k][j][i][4] -
lhsm[k][i][3]*rhs[k1][j][i][4] -
lhsm[k][i][4]*rhs[k2][j][i][4];
}
}
}
tzetar();
}
|
dscatter.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
/*! @file
* \brief Scatter the computed blocks into LU destination.
*
* <pre>
* -- Distributed SuperLU routine (version 6.1.1) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley.
* October 1, 2014
*
* Modified:
* September 18, 2017, enable SIMD vectorized scatter operation.
*
*/
#include <math.h>
#include "superlu_ddefs.h"
static void
dscatter_l_1 (int ib,
int ljb,
int nsupc,
int_t iukp,
int_t* xsup,
int klst,
int nbrow,
int_t lptr,
int temp_nbrow,
int * usub,
int * lsub,
double *tempv,
int * indirect_thread,
int_t ** Lrowind_bc_ptr, double **Lnzval_bc_ptr,
gridinfo_t * grid)
{
// TAU_STATIC_TIMER_START("SCATTER_LB");
// printf("hello\n");
int_t rel, i, segsize, jj;
double *nzval;
int_t *index = Lrowind_bc_ptr[ljb];
int_t ldv = index[1]; /* LDA of the dest lusup. */
int_t lptrj = BC_HEADER;
int_t luptrj = 0;
int_t ijb = index[lptrj];
while (ijb != ib)
{
/* Search for dest block --
blocks are not ordered! */
luptrj += index[lptrj + 1];
lptrj += LB_DESCRIPTOR + index[lptrj + 1];
ijb = index[lptrj];
}
/*
* Build indirect table. This is needed because the
* indices are not sorted for the L blocks.
*/
int_t fnz = FstBlockC (ib);
lptrj += LB_DESCRIPTOR;
for (i = 0; i < index[lptrj - 1]; ++i)
{
rel = index[lptrj + i] - fnz;
indirect_thread[rel] = i;
}
nzval = Lnzval_bc_ptr[ljb] + luptrj;
// tempv =bigV + (cum_nrow + cum_ncol*nbrow);
for (jj = 0; jj < nsupc; ++jj)
{
segsize = klst - usub[iukp + jj];
// printf("segsize %d \n",segsize);
if (segsize) {
/*#pragma _CRI cache_bypass nzval,tempv */
for (i = 0; i < temp_nbrow; ++i) {
rel = lsub[lptr + i] - fnz;
nzval[indirect_thread[rel]] -= tempv[i];
// printf("i (src) %d, perm (dest) %d \n",i,indirect_thread[rel]);
#ifdef PI_DEBUG
double zz = 0.0;
// if(!(*(long*)&zz == *(long*)&tempv[i]) )
printf ("(%d %d, %0.3e, %0.3e, %3e ) ", ljb,
nzval - Lnzval_bc_ptr[ljb] + indirect_thread[rel],
nzval[indirect_thread[rel]] + tempv[i],
nzval[indirect_thread[rel]],tempv[i]);
//printing triplets (location??, old value, new value ) if none of them is zero
#endif
}
// printf("\n");
tempv += nbrow;
#ifdef PI_DEBUG
// printf("\n");
#endif
}
nzval += ldv;
// printf("%d\n",nzval );
}
// TAU_STATIC_TIMER_STOP("SCATTER_LB");
} /* dscatter_l_1 */
static void
dscatter_l (
int ib, /* row block number of source block L(i,k) */
int ljb, /* local column block number of dest. block L(i,j) */
int nsupc, /* number of columns in destination supernode */
int_t iukp, /* point to destination supernode's index[] */
int_t* xsup,
int klst,
int nbrow, /* LDA of the block in tempv[] */
int_t lptr, /* Input, point to index[] location of block L(i,k) */
int temp_nbrow, /* number of rows of source block L(i,k) */
int_t* usub,
int_t* lsub,
double *tempv,
int* indirect_thread,int* indirect2,
int_t ** Lrowind_bc_ptr, double **Lnzval_bc_ptr,
gridinfo_t * grid)
{
int_t rel, i, segsize, jj;
double *nzval;
int_t *index = Lrowind_bc_ptr[ljb];
int_t ldv = index[1]; /* LDA of the destination lusup. */
int_t lptrj = BC_HEADER;
int_t luptrj = 0;
int_t ijb = index[lptrj];
while (ijb != ib) /* Search for destination block L(i,j) */
{
luptrj += index[lptrj + 1];
lptrj += LB_DESCRIPTOR + index[lptrj + 1];
ijb = index[lptrj];
}
/*
* Build indirect table. This is needed because the indices are not sorted
* in the L blocks.
*/
int_t fnz = FstBlockC (ib);
int_t dest_nbrow;
lptrj += LB_DESCRIPTOR;
dest_nbrow=index[lptrj - 1];
#if (_OPENMP>=201307)
#pragma omp simd
#endif
for (i = 0; i < dest_nbrow; ++i) {
rel = index[lptrj + i] - fnz;
indirect_thread[rel] = i;
}
#if (_OPENMP>=201307)
#pragma omp simd
#endif
/* can be precalculated? */
for (i = 0; i < temp_nbrow; ++i) { /* Source index is a subset of dest. */
rel = lsub[lptr + i] - fnz;
indirect2[i] =indirect_thread[rel];
}
nzval = Lnzval_bc_ptr[ljb] + luptrj; /* Destination block L(i,j) */
#ifdef __INTEL_COMPILER
#pragma ivdep
#endif
for (jj = 0; jj < nsupc; ++jj) {
segsize = klst - usub[iukp + jj];
if (segsize) {
#if (_OPENMP>=201307)
#pragma omp simd
#endif
for (i = 0; i < temp_nbrow; ++i) {
nzval[indirect2[i]] -= tempv[i];
}
tempv += nbrow;
}
nzval += ldv;
}
} /* dscatter_l */
static void
dscatter_u (int ib,
int jb,
int nsupc,
int_t iukp,
int_t * xsup,
int klst,
int nbrow, /* LDA of the block in tempv[] */
int_t lptr, /* point to index location of block L(i,k) */
int temp_nbrow, /* number of rows of source block L(i,k) */
int_t* lsub,
int_t* usub,
double* tempv,
int_t ** Ufstnz_br_ptr, double **Unzval_br_ptr,
gridinfo_t * grid)
{
#ifdef PI_DEBUG
printf ("A(%d,%d) goes to U block \n", ib, jb);
#endif
// TAU_STATIC_TIMER_START("SCATTER_U");
// TAU_STATIC_TIMER_START("SCATTER_UB");
int_t jj, i, fnz, rel;
int segsize;
double *ucol;
int_t ilst = FstBlockC (ib + 1);
int_t lib = LBi (ib, grid);
int_t *index = Ufstnz_br_ptr[lib];
/* Reinitilize the pointers to the beginning of the k-th column/row of
* L/U factors.
* usub[] - index array for panel U(k,:)
*/
int_t iuip_lib, ruip_lib;
iuip_lib = BR_HEADER;
ruip_lib = 0;
int_t ijb = index[iuip_lib];
while (ijb < jb) { /* Search for destination block. */
ruip_lib += index[iuip_lib + 1];
// printf("supersize[%ld] \t:%ld \n",ijb,SuperSize( ijb ) );
iuip_lib += UB_DESCRIPTOR + SuperSize (ijb);
ijb = index[iuip_lib];
}
/* Skip descriptor. Now point to fstnz index of block U(i,j). */
iuip_lib += UB_DESCRIPTOR;
// tempv = bigV + (cum_nrow + cum_ncol*nbrow);
for (jj = 0; jj < nsupc; ++jj) {
segsize = klst - usub[iukp + jj];
fnz = index[iuip_lib++];
if (segsize) { /* Nonzero segment in U(k,j). */
ucol = &Unzval_br_ptr[lib][ruip_lib];
// printf("========Entering loop=========\n");
#if (_OPENMP>=201307)
#pragma omp simd
#endif
for (i = 0; i < temp_nbrow; ++i) {
rel = lsub[lptr + i] - fnz;
// printf("%d %d %d %d %d \n",lptr,i,fnz,temp_nbrow,nbrow );
// printf("hello ucol[%d] %d %d : \n",rel,lsub[lptr + i],fnz);
ucol[rel] -= tempv[i];
#ifdef PI_DEBUG
double zz = 0.0;
if (!(*(long *) &zz == *(long *) &tempv[i]))
printf ("(%d, %0.3e, %0.3e ) ", rel, ucol[rel] + tempv[i],
ucol[rel]);
//printing triplets (location??, old value, new value ) if none of them is zero
#endif
} /* for i = 0:temp_nbropw */
tempv += nbrow; /* Jump LDA to next column */
#ifdef PI_DEBUG
// printf("\n");
#endif
} /* if segsize */
ruip_lib += ilst - fnz;
} /* for jj = 0:nsupc */
#ifdef PI_DEBUG
// printf("\n");
#endif
// TAU_STATIC_TIMER_STOP("SCATTER_UB");
} /* dscatter_u */
/*Divide CPU-GPU dgemm work here*/
#ifdef PI_DEBUG
int Ngem = 2;
// int_t Ngem = 0;
int min_gpu_col = 6;
#else
// int_t Ngem = 0;
#endif
#ifdef GPU_ACC
void
gemm_division_cpu_gpu(
int* num_streams_used, /*number of streams that will be used */
int* stream_end_col, /*array holding last column blk for each partition */
int * ncpu_blks, /*Number of CPU dgemm blks */
/*input */
int nbrow, /*number of row in A matrix */
int ldu, /*number of k in dgemm */
int nstreams,
int* full_u_cols, /*array containing prefix sum of work load */
int num_blks /*Number of work load */
)
{
int Ngem = sp_ienv_dist(7); /*get_mnk_dgemm ();*/
int min_gpu_col = get_cublas_nb ();
// Ngem = 1000000000;
/*
cpu is to gpu dgemm should be ideally 0:1 ratios to hide the total cost
However since there is gpu latency of around 20,000 ns implying about
200000 floating point calculation be done in that time so ~200,000/(2*nbrow*ldu)
should be done in cpu to hide the latency; we Ngem =200,000/2
*/
int i, j;
// {
// *num_streams_used=0;
// *ncpu_blks = num_blks;
// return;
// }
for (int i = 0; i < nstreams; ++i)
{
stream_end_col[i] = num_blks;
}
*ncpu_blks = 0;
/*easy returns -1 when number of column are less than threshold */
if (full_u_cols[num_blks - 1] < (Ngem / (nbrow * ldu)) || num_blks == 1 )
{
*num_streams_used = 0;
*ncpu_blks = num_blks;
#ifdef PI_DEBUG
printf ("full_u_cols[num_blks-1] %d %d \n",
full_u_cols[num_blks - 1], (Ngem / (nbrow * ldu)));
printf ("Early return \n");
#endif
return;
}
/* Easy return -2 when number of streams =0 */
if (nstreams == 0)
{
*num_streams_used = 0;
*ncpu_blks = num_blks;
return;
/* code */
}
/*find first block where count > Ngem */
for (i = 0; i < num_blks - 1; ++i) /*I can use binary search here */
{
if (full_u_cols[i + 1] > Ngem / (nbrow * ldu))
break;
}
*ncpu_blks = i + 1;
int_t cols_remain =
full_u_cols[num_blks - 1] - full_u_cols[*ncpu_blks - 1];
#ifdef PI_DEBUG
printf ("Remaining cols %d num_blks %d cpu_blks %d \n", cols_remain,
num_blks, *ncpu_blks);
#endif
if (cols_remain > 0)
{
*num_streams_used = 1; /* now atleast one stream would be used */
#ifdef PI_DEBUG
printf ("%d %d %d %d \n", full_u_cols[num_blks - 1],
full_u_cols[*ncpu_blks], *ncpu_blks, nstreams);
#endif
int_t FP_MIN = 200000 / (nbrow * ldu);
int_t cols_per_stream = SUPERLU_MAX (min_gpu_col, cols_remain / nstreams);
cols_per_stream = SUPERLU_MAX (cols_per_stream, FP_MIN);
#ifdef PI_DEBUG
printf ("cols_per_stream :\t%d\n", cols_per_stream);
#endif
int_t cutoff = cols_per_stream + full_u_cols[*ncpu_blks - 1];
for (int_t i = 0; i < nstreams; ++i)
{
stream_end_col[i] = num_blks;
}
j = *ncpu_blks;
for (i = 0; i < nstreams - 1; ++i)
{
int_t st = (i == 0) ? (*ncpu_blks) : stream_end_col[i - 1];
for (j = st; j < num_blks - 1; ++j)
{
#ifdef PI_DEBUG
printf ("i %d, j %d, %d %d ", i, j, full_u_cols[j + 1],
cutoff);
#endif
if (full_u_cols[j + 1] > cutoff)
{
#ifdef PI_DEBUG
printf ("cutoff met \n");
#endif
cutoff = cols_per_stream + full_u_cols[j];
stream_end_col[i] = j + 1;
*num_streams_used += 1;
j++;
break;
}
#ifdef PI_DEBUG
printf ("\n");
#endif
}
}
}
}
void
gemm_division_new (int * num_streams_used, /*number of streams that will be used */
int * stream_end_col, /*array holding last column blk for each partition */
int * ncpu_blks, /*Number of CPU dgemm blks */
/*input */
int nbrow, /*number of row in A matrix */
int ldu, /*number of k in dgemm */
int nstreams,
Ublock_info_t *Ublock_info, /*array containing prefix sum of work load */
int num_blks /*Number of work load */
)
{
int Ngem = sp_ienv_dist(7); /*get_mnk_dgemm ();*/
int min_gpu_col = get_cublas_nb ();
// Ngem = 1000000000;
/*
cpu is to gpu dgemm should be ideally 0:1 ratios to hide the total cost
However since there is gpu latency of around 20,000 ns implying about
200000 floating point calculation be done in that time so ~200,000/(2*nbrow*ldu)
should be done in cpu to hide the latency; we Ngem =200,000/2
*/
int_t i, j;
for (int i = 0; i < nstreams; ++i)
{
stream_end_col[i] = num_blks;
}
*ncpu_blks = 0;
/*easy returns -1 when number of column are less than threshold */
if (Ublock_info[num_blks - 1].full_u_cols < (Ngem / (nbrow * ldu)) || num_blks == 1)
{
*num_streams_used = 0;
*ncpu_blks = num_blks;
return;
}
/* Easy return -2 when number of streams =0 */
if (nstreams == 0)
{
*num_streams_used = 0;
*ncpu_blks = num_blks;
return;
/* code */
}
/*find first block where count > Ngem */
for (i = 0; i < num_blks - 1; ++i) /*I can use binary search here */
{
if (Ublock_info[i + 1].full_u_cols > Ngem / (nbrow * ldu))
break;
}
*ncpu_blks = i + 1;
int_t cols_remain =
Ublock_info [num_blks - 1].full_u_cols - Ublock_info[*ncpu_blks - 1].full_u_cols;
if (cols_remain > 0)
{
*num_streams_used = 1; /* now atleast one stream would be used */
int_t FP_MIN = 200000 / (nbrow * ldu);
int_t cols_per_stream = SUPERLU_MAX (min_gpu_col, cols_remain / nstreams);
cols_per_stream = SUPERLU_MAX (cols_per_stream, FP_MIN);
int_t cutoff = cols_per_stream + Ublock_info[*ncpu_blks - 1].full_u_cols;
for (int_t i = 0; i < nstreams; ++i)
{
stream_end_col[i] = num_blks;
}
j = *ncpu_blks;
for (i = 0; i < nstreams - 1; ++i)
{
int_t st = (i == 0) ? (*ncpu_blks) : stream_end_col[i - 1];
for (j = st; j < num_blks - 1; ++j)
{
if (Ublock_info[j + 1].full_u_cols > cutoff)
{
cutoff = cols_per_stream + Ublock_info[j].full_u_cols;
stream_end_col[i] = j + 1;
*num_streams_used += 1;
j++;
break;
}
}
}
}
}
#endif /* defined GPU_ACC */
|
omp_bucket.c | /* Code originally from The Crazy Programmer https://www.thecrazyprogrammer.com/2017/02/bucket-sort-in-c.html
*
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "timer.h"
struct bucket
{
int count;
int* value;
};
int compareIntegers(const void* first, const void* second)
{
int x = *((int*)first), y = *((int*)second);
if (x == y)
{
return 0;
}
else if (x < y)
{
return -1;
}
else
{
return 1;
}
}
void bucketSort(int array[],int n)
{
struct bucket buckets[3];
int i, j, k;
#pragma omp parallel for shared(buckets)
for (i = 0; i < 3; i++)
{
buckets[i].count = 0;
buckets[i].value = (int*)malloc(sizeof(int) * n);
}
#pragma omp parallel for shared(array, buckets)
for (i = 0; i < n; i++)
{
if (array[i] < 0)
{
buckets[0].value[buckets[0].count++] = array[i];
}
else if (array[i] > 10)
{
buckets[2].value[buckets[2].count++] = array[i];
}
else
{
buckets[1].value[buckets[1].count++] = array[i];
}
}
for (k = 0, i = 0; i < 3; i++)
{
// now using quicksort to sort the elements of buckets
qsort(buckets[i].value, buckets[i].count, sizeof(int), &compareIntegers);
for (j = 0; j < buckets[i].count; j++)
{
array[k + j] = buckets[i].value[j];
}
k += buckets[i].count;
free(buckets[i].value);
}
}
int main(int am, char *arg[]) {
int size;
size = atoi(arg[1]);
printf("0: %d\n", size);
//int array[100] = { 5, -34, 10, 1, -42, 123, 2, 395, 5, 4, 1234, 7 };
int array[size];
for (int i = 0; i < size; i++) {
array[i] = rand();
}
int i = size,j,k,n;
n=i;
/*
printf("Before Sorting\n");
for (j = 0; j<i; j++)
{
printf("%d ", array[j]);
}
*/
StartTimer();
bucketSort(array, n);
double runtime = GetTimer();
printf("Total: %f s \n", runtime / 1000);
/*
*
*
printf("\n After Sorting\n");
for (k = 0; k<i; k++)
printf("%d ", array[k]);
*/
return 0;
}
|
second_implementation.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <string.h>
#define BUFF_SIZE 50
#define MAXSIZE 100
#define MAXLINE 50
#define N_EXEC 100 //number of parallel executed k_means algorithms
#define N_FOLD 10 //number of folds for cross validation
#define K_MAX 10 //max number of cluster
#define CHUNKSIZE 10
#define IMPL 2
char FILEPATH[] = "data/iris_high.txt";
int k = 2; // initial value of clusters
int threads, chunkn, chunkm;
struct data
{
int dim;
int atts;
float** data;
};
//structure that contains all SSE and centroids computed
struct history
{
float SSE;
float** centroids;
};
float calcSilhouette(float** dataset, int **clusters, float** centroids, int n, int m);
struct data loadDataset(char* fileName, char* dist);
void normalize(struct data* dataset);
void datasetSubSets(struct data dataset, int fold, struct data* trainingSet, struct data* testSet);
float mainAlgo(struct data training, struct data test, int flagFinal);
void kmeans (struct data structure, int numIte, float tol, struct history* recordStoria);
void copySubMatrix(float** centroids, float** dataset, int *ranNum, int m);
void randomIndexes(int *ranNum, int n);
void zeroClusters(int **clusters, int n);
void findClusters(float** dataset, int **clusters, float** centroids, int n, int m);
float calcSSE(float** dataset, int **clusters, float** centroids, int n, int m);
void freeArray(float **a, int n);
void copyMatrix(float **mat1, float **mat2, int row, int col);
void printData(struct data dataset);
void getRow(float **matrix, int row, float *array, int m);
float eucliDist(float *rec1, float *rec2, int m);
void findCentroids(float** centroids, int **clusters, float** dataset, int n, int m);
void printClusters(int **clusters, int n);
void printCentroids(float** centroids, int m);
void writeFile(float** data, int **clusters, int n, int m);
void freeArrayInt(int **a, int n);
int omp_thread_count();
//////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////
int main (int argc, char *argv[])
{
threads = omp_thread_count();
strcpy(FILEPATH, argv[1]);
char file[30] = "../data/";
strcat(file,argv[1]);
strcat(file,".csv");
double begin = omp_get_wtime();
double end;
struct data dataset = loadDataset(file, "\t");
chunkn = dataset.dim / threads;
chunkm = (dataset.atts / threads) + 1;
printf("\nCHUNKN: %d - CHUNKM: %d\n", chunkn, chunkm);
normalize(&dataset);
printf("DIM: %d\n", dataset.dim);
struct data trainingSet;
struct data testSet;
int bestk=2;
float sumSil[ K_MAX-1 ], appSSE, appSIL, SIL[ K_MAX-1 ];
for(k = 2; k<=K_MAX; k++) {
printf("\nAnalizing for k = %d", k);
sumSil[k-2] = 0;
//N_FOLD-fold cross validation
for(int fold=0; fold<N_FOLD; fold++) {
trainingSet.data = (float**) calloc (dataset.dim - (dataset.dim / N_FOLD), sizeof(float*));
testSet.data = (float**) calloc (dataset.dim / N_FOLD, sizeof(float*));
datasetSubSets(dataset, fold, &trainingSet, &testSet);
sumSil[k-2] +=mainAlgo(trainingSet, testSet, 0);
if(fold<N_FOLD-1){
freeArray(trainingSet.data, trainingSet.dim);
freeArray(testSet.data, testSet.dim);
}
}
sumSil[k-2] = sumSil[k-2]/N_FOLD;
//AIC[k-2] = 2*k + log10(sumSil[k-2]/testSet.dim); //Akaike criterion result
printf("\nSilhouette: %f",sumSil[k-2]);
//printf("\nAIC: %f, altro: %f", AIC[k-2], testSet.dim*log10(sumSil[k-2]/testSet.dim));
if(k==2)
appSIL = sumSil[k-2];
if(sumSil[k-2] > appSIL) {
bestk = k;
appSIL = sumSil[k-2];
}
freeArray(trainingSet.data, trainingSet.dim);
freeArray(testSet.data, testSet.dim);
end = omp_get_wtime();
double time_spent = (end - begin);
printf("\nTime from start: %lf sec \n------------------------", time_spent);
}
printf("\n best k is: %d with Silhuette: %f", bestk, appSIL);
// Setting the number of clusters to the best one chosen before as a result of AIC compare
k = bestk;
mainAlgo(dataset, dataset, 1);
end = omp_get_wtime();
FILE* fd;
fd = fopen("tempi.txt", "a");
fprintf(fd,"\n%lf sec\t%d\t%s\t%d",(end - begin), omp_get_num_threads(), FILEPATH, IMPL);
printf("\nk_max= %d, Total time: %lf sec\n",K_MAX,(end - begin));
}
//main algorithm
//flag is =1 only when final iteration is computed (in order to write out the results)
float mainAlgo(struct data training, struct data test, int flagFinal) {
struct history bestStoria;
float supportSSE;
int** bestClusters =(int**) calloc(test.dim, sizeof(int*));
bestStoria.centroids = (float**) malloc(k * sizeof(float*));
for(int i=0; i<test.dim; i++)
bestClusters[i] =(int*) calloc(k, sizeof(int));
for(int i=0; i<k; i++)
bestStoria.centroids[i] =(float*) malloc(training.atts *sizeof(float));
bestStoria.SSE = training.atts * training.dim;
struct history storia;
storia.centroids = (float**) calloc(k, sizeof(float*));
for(int i=0; i<k; i++) {
storia.centroids[i] =(float*) calloc(training.atts, sizeof(float));
}
srand( time(NULL) );
for(int i=0; i<N_EXEC; i++) {
kmeans(training, 5000, 0.001, &storia);
if(storia.SSE <= bestStoria.SSE) {
copyMatrix(bestStoria.centroids, storia.centroids, k, training.atts);
bestStoria.SSE = storia.SSE;
}
}
zeroClusters(bestClusters, test.dim); //reset best clusters matrix
findClusters(test.data, bestClusters, bestStoria.centroids, test.dim, test.atts);
//print last iteration results
if(flagFinal == 1) {
printCentroids(bestStoria.centroids, test.atts);
writeFile(test.data, bestClusters, test.dim, test.atts);
}
float SSEtrovato = calcSSE(test.data, bestClusters, bestStoria.centroids, test.dim, test.atts);
float silTrovata = calcSilhouette(test.data, bestClusters, bestStoria.centroids, test.dim, test.atts);
//printf("\nthread: %d", omp_get_thread_num());
freeArray(bestStoria.centroids, k);
freeArray(storia.centroids, k);
freeArrayInt(bestClusters, test.dim);
return silTrovata;
}
float calcSilhouette(float** dataset, int **clusters, float** centroids, int n, int m){
float sum=0.0, supDataset[m], supCentroid[m], avgi[k], avge[k], minAvge = 10, a, b, max = 0, sil, meansil = 0;
int trovato = 0;
int ci = 0;
for(int ki=0; ki<k; ki++){
getRow(centroids, ki, supCentroid, m);
avgi[ki] = 0;
max = 0;
for(int i=0;i<n;i++){
getRow(dataset, i, supDataset, m);
ci += clusters[i][ki];
avgi[ki] += eucliDist(supCentroid, supDataset, m) * clusters[i][ki];
if(trovato==0)
avge[ki] += eucliDist(supCentroid, supDataset, m) * (1 - clusters[i][ki]);
if(clusters[i][ki]==0) trovato = 1;
if(clusters[i][ki] == 0 && minAvge > eucliDist(supCentroid, supDataset, m)) minAvge = eucliDist(supCentroid, supDataset, m);
}
if(ci!=0)
avgi[ki] = avgi[ki] / ci;
trovato = 0;
ci=0;
}
float lowestAvge = minAvge, avgiMean=0;
for(int ki=0;ki<k;ki++){
if(lowestAvge>=avgi[ki]){
max = lowestAvge;
sil = 1-(avgi[ki]/lowestAvge);
}
else{
max = avgi[ki];
sil = (lowestAvge/avgi[ki])-1;
}
meansil += sil;
}
meansil = meansil / k;
return meansil;
}
void kmeans (struct data structure, int numIte, float tol, struct history* recordStoria)
{
int n, m, *ranNum;
n = structure.dim;
m = structure.atts;
ranNum = (int*) calloc(k, sizeof(int));
//generate k random indexes to start from
randomIndexes(ranNum, n);
float** centroids = (float**) calloc(k, sizeof(float*));
int** clusters =(int**) calloc(n, sizeof(int*));
for(int i=0; i<n; i++) {
clusters[i] =(int*) calloc(k, sizeof(int));
}
for(int i=0; i<k; i++) {
centroids[i] =(float*) calloc(m, sizeof(float));
}
//saving initial centroids
copySubMatrix(centroids, structure.data, ranNum, m);
/*saving indexes that corresponds to each of the k clusters.
Results will be stored into a matrix which columns are the
cluster number and the rows corresponds to the indexes of records */
zeroClusters(clusters, n); //pongo a 0 tutti gli elementi del cluster
findClusters(structure.data, clusters, centroids, n, m);
int count = 0;
float newSSE, currSSE;
float** supCentroids = (float**) calloc(k, sizeof(float*));
for(int j = 0; j < k; j++)
supCentroids[j] = (float*) calloc(m, sizeof(float));
do {
currSSE = calcSSE(structure.data, clusters, centroids, n, m);
copyMatrix(supCentroids, centroids, k, m);
findCentroids(centroids, clusters, structure.data, n, m);
zeroClusters(clusters, n);
findClusters(structure.data,clusters,centroids, n, m);
newSSE = calcSSE(structure.data, clusters, centroids, n, m); //sum of square errors calculation
count++;
} while(count < numIte && ((currSSE-newSSE)/currSSE) > tol);
if(newSSE > currSSE) {
copyMatrix(centroids, supCentroids, k, m);
newSSE = currSSE;
}
copyMatrix(recordStoria->centroids, centroids, k, m);
recordStoria->SSE = newSSE;
freeArray(centroids,k);
freeArrayInt(clusters,n);
freeArray(supCentroids,k);
free(ranNum);
}
void printData(struct data dataset) {
printf("\n");
for(int i = 0; i < dataset.dim; i++) {
printf("%d\t", i + 1);
for(int j = 0; j < dataset.atts; j++) {
printf("%.2f\t", dataset.data[i][j]);
}
printf("\n");
}
}
void normalize(struct data* dataset) {
int i, j;
printf("Normalizing the data\n");
float max[dataset->atts];
// Look for max of each column
for(i = 0; i < dataset->dim; i++) {
for(j = 0; j < dataset->atts; j++) {
if(i == 0) {
max[j] = 0;
}
if(max[j] < dataset->data[i][j])
max[j] = dataset->data[i][j];
}
}
// Normalize the data by dividing each value by the max value of the column
for(i = 0; i < dataset->dim; i++) {
for(j = 0; j < dataset->atts; j++) {
dataset->data[i][j] = dataset->data[i][j] / max[j];
}
}
}
void datasetSubSets(struct data dataset, int fold, struct data* trainingSet, struct data* testSet) {
int init, end, apptr = 0, appte=0;
init = fold * (dataset.dim / N_FOLD);
end = ((fold + 1) * (dataset.dim / N_FOLD)) - 1;
trainingSet->dim = 0;
trainingSet->atts = dataset.atts;
testSet->dim = 0;
testSet->atts = dataset.atts;
for (int i = 0; i < dataset.dim; i++) {
if(i >= init && i <= end) {
testSet->data[appte] = (float*) calloc(dataset.atts, sizeof(float));
for(int u=0;u<dataset.atts;u++)
testSet->data[appte][u] = dataset.data[i][u];
appte++;
} else {
trainingSet->data[apptr] = (float*) calloc(dataset.atts, sizeof(float));
for(int u=0; u<dataset.atts; u++)
trainingSet->data[apptr][u] = dataset.data[i][u];
apptr++;
}
}
testSet->dim = appte;
trainingSet->dim = apptr;
}
struct data loadDataset(char* fileName, char* dist) {
FILE *file;
int max = MAXSIZE;
float* app;
float** data;
struct data dataset;
// Open file and check for I/O errors
file = fopen(fileName, "r");
if (file == NULL) exit(-1);
else printf("Loading from file: %s\n", fileName);
char buffer[MAXLINE];
// Count the number of attributes
if (fgets(buffer, sizeof(buffer), file)) {
// Get the number of attributes
sscanf(buffer, "%d", &dataset.atts);
printf("atts: %d\n", dataset.atts);
}
// Read the actual data from the file
dataset.dim = 0;
// Read line by line until there are more in the file
dataset.data = (float**) calloc (MAXSIZE, sizeof(float*));
while(fgets(buffer, sizeof(buffer), file)) {
dataset.data[dataset.dim] = (float*) calloc(dataset.atts, sizeof(float));
// Take the first token in the current line of data
char *headapp = strtok(buffer, dist);
int i = 0;
do {
// Get the token and convert it to float
dataset.data[dataset.dim][i] = atof(headapp);
// Get the next token
headapp = strtok(NULL, dist);
i++;
} while(headapp != NULL);
dataset.dim++;
if(dataset.dim > max) {
max += MAXSIZE;
dataset.data = (float**) realloc(dataset.data, sizeof(float) * dataset.atts * max);
}
}
return dataset;
}
void freeArray(float **a, int n) {
for (int i = 0; i < n; ++i) {
free(a[i]);
}
free(a);
}
void freeArrayInt(int **a, int n) {
for (int i = 0; i < n; ++i) {
free(a[i]);
}
free(a);
}
void copyMatrix(float **mat1, float **mat2, int row, int col){ //mat1 dest, mat2 source
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
mat1[i][j] = mat2[i][j];
}
}
}
//copy datasets records that corrensponds to centroids
void copySubMatrix(float **centroids, float** dataset, int *ranNum, int m) {
for(int i=0; i<k; i++)
{
for(int j=0; j<m; j++)
centroids[i][j] = dataset[ ranNum[i] ][j];
}
}
float calcSSE(float** dataset, int **clusters, float** centroids, int n, int m){
float sum=0.0, supDataset[m], supCentroid[m];
for(int ki=0; ki<k; ki++){
getRow(centroids, ki, supCentroid, m);
#pragma omp parallel for private(supDataset) reduction(+:sum) schedule(static, chunkn)
for(int i=0;i<n;i++){
getRow(dataset, i, supDataset, m);
sum += eucliDist(supCentroid, supDataset, m) * clusters[i][ki];
}
}
return sum;
}
void printClusters(int **clusters, int n){
printf("\t\t");
for(int ki=0;ki<k;ki++){
printf("k%d\t",ki);
}
for(int i=0;i<n;i++){
printf("\nrecord%d:\t",i);
for(int j=0;j<k;j++){
printf("%d\t",clusters[i][j]);
}
}
printf("\n");
}
void findCentroids(float** centroids, int **clusters, float** dataset, int n, int m) {
int elemCluster=0;
float record[m];
//reset array
for(int ki=0; ki<k; ki++) {
for(int p=0; p<m; p++) {
record[p] = 0;
}
for(int i=0; i<n; i++) {
if(clusters[i][ki]!=0) {
elemCluster++;
for(int j=0; j<m; j++)
record[j] += dataset[i][j];
}
}
#pragma omp parallel for schedule(static, chunkm) if(m >= omp_get_num_threads())
for(int p=0; p<m; p++) {
if(elemCluster!=0)
record[p] = record[p]/elemCluster;
else
record[p]=0;
centroids[ki][p] = record[p];
}
elemCluster = 0;
}
}
void printCentroids(float** centroids, int m) {
int p, i;
for(i = 0; i < k; i++) {
printf("\ncentroide cluster %d esimo: ",i);
for(p=0;p<m;p++){
printf("%.2f,",centroids[i][p]);
}
printf("\n");
}
}
void zeroClusters(int **clusters, int n){
for(int i=0;i<n;i++){
for(int j=0;j<k;j++){
clusters[i][j] = 0;
}
}
}
//it assigns each dataset record to the nearest centroid, which corrensponds to its cluster
void findClusters(float** dataset, int **clusters, float **centroids, int n, int m){
int salvaK=0;
float supCentroid[m], supDataset[m], dist=0, lowerDist;
#pragma omp parallel for schedule(static, chunkn) private(lowerDist, dist, salvaK, supCentroid, supDataset)
for(int i=0;i<n;i++){
lowerDist = m;
// lowerDist = m because data is normalized, so the max dist between 2 records is the number of attributes
for(int ki=0;ki<k;ki++){
getRow(centroids, ki, supCentroid, m); //extract a row from the centroid matrix
getRow(dataset, i, supDataset, m); //extract a row from the dataset matrix
dist = eucliDist(supCentroid, supDataset, m); //computing the euclidean distance
if(dist<=lowerDist)
{
lowerDist = dist;
salvaK = ki;
}
}
clusters[i][salvaK] = 1;
}
}
void getRow(float **matrix, int row, float *array, int m){
for(int j = 0; j < m; j++){
array[j] = matrix[row][j];
}
}
/*function that generate k indexes, which will refer to k initial centroids*/
void randomIndexes(int *ranNum, int n){
int i;
for(i=0;i<k;i++){
ranNum[i] = rand()%n;
}
}
/*receive two records in input so it calculates euclidean distance, continuos data required */
float eucliDist(float *rec1, float *rec2, int m){
float sum = 0.0;
for(int i=0;i<m;i++){
sum += ((rec1[i]-rec2[i]))*((rec1[i]-rec2[i]));
}
return sqrt(sum);
}
void writeFile(float** data ,int **clusters, int n, int m) {
FILE* fd;
char output[30] = "out/";
strcat(output,FILEPATH);
strcat(output,".txt");
fd = fopen(output, "w");
for(int i = 0; i < n; i++) {
fprintf(fd, "%d", i);
for(int j = 0; j < k; j++) {
fprintf(fd, "\t%d", clusters[i][j]);
}
fprintf(fd,"\n");
}
}
int omp_thread_count() {
int n = 0;
#pragma omp parallel reduction(+:n)
n += 1;
printf("\nNUM THREADS: %d\n", n);
return n;
}
|
subCycleHex3D.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
extern "C" void FUNC(subCycleStrongCubatureVolumeHex3D)(const int & Nelements,
const int * __restrict__ elementList,
const dfloat * __restrict__ cubD,
const dfloat * __restrict__ cubInterpT,
const int & offset,
const int & cubatureOffset,
const int & NUoffset,
const dfloat * __restrict__ invLumpedMassMatrix,
const dfloat * __restrict__ BdivW,
const dfloat & c0,
const dfloat & c1,
const dfloat & c2,
const dfloat * __restrict__ conv,
const dfloat * __restrict__ Ud,
dfloat * __restrict__ NU) {
// (phi, U.grad Ud)
dfloat r_c[3] = {c0, c1,c2};
dfloat s_cubD[p_cubNq][p_cubNq];
dfloat s_cubInterpT[p_Nq][p_cubNq];
dfloat s_U[p_cubNq][p_cubNq];
dfloat s_V[p_cubNq][p_cubNq];
dfloat s_W[p_cubNq][p_cubNq];
dfloat s_Ud[p_cubNq][p_cubNq];
dfloat s_Vd[p_cubNq][p_cubNq];
dfloat s_Wd[p_cubNq][p_cubNq];
dfloat s_Ud1[p_Nq][p_cubNq];
dfloat s_Vd1[p_Nq][p_cubNq];
dfloat s_Wd1[p_Nq][p_cubNq];
dfloat r_U2[p_cubNq][p_cubNq][p_cubNq], r_V2[p_cubNq][p_cubNq][p_cubNq], r_W2[p_cubNq][p_cubNq][p_cubNq];
dfloat r_Ud[p_cubNq][p_cubNq][p_cubNq], r_Vd[p_cubNq][p_cubNq][p_cubNq], r_Wd[p_cubNq][p_cubNq][p_cubNq];
#pragma unroll
for (int j = 0; j < p_cubNq; ++j) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
const int id = i + j * p_cubNq;
if (id < p_Nq * p_cubNq) {
s_cubInterpT[j][i] = cubInterpT[id];
}
s_cubD[j][i] = cubD[id];
}
}
#ifdef __NEKRS__OMP__
#pragma omp parallel for private(s_U, s_V, s_W, s_Ud, s_Vd, s_Wd, s_Ud1, s_Vd1, s_Wd1, r_U2, r_V2, r_W2, r_Ud, r_Vd, r_Wd)
#endif
for (int e = 0; e < Nelements; ++e) {
const int element = elementList[e];
#pragma unroll
for (int j = 0; j < p_cubNq; ++j) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
#pragma unroll
for (int k = 0; k < p_cubNq; ++k) {
r_Ud[j][i][k] = 0;
r_Vd[j][i][k] = 0;
r_Wd[j][i][k] = 0;
}
}
}
#pragma unroll
for (int c = 0; c < p_Nq; ++c) {
#pragma unroll
for (int b = 0; b < p_Nq; ++b) {
#pragma unroll
for (int a = 0; a < p_Nq; ++a) {
// this can be improved
const int id = element * p_Np + c * p_Nq * p_Nq + b * p_Nq + a;
s_Ud[b][a] = Ud[id + 0 * offset];
s_Vd[b][a] = Ud[id + 1 * offset];
s_Wd[b][a] = Ud[id + 2 * offset];
}
}
// interpolate in 'r'
#pragma unroll
for (int b = 0; b < p_Nq; ++b) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
dfloat Ud1 = 0, Vd1 = 0, Wd1 = 0;
#pragma unroll
for (int a = 0; a < p_Nq; ++a) {
dfloat Iia = s_cubInterpT[a][i];
Ud1 += Iia * s_Ud[b][a];
Vd1 += Iia * s_Vd[b][a];
Wd1 += Iia * s_Wd[b][a];
}
s_Ud1[b][i] = Ud1;
s_Vd1[b][i] = Vd1;
s_Wd1[b][i] = Wd1;
}
}
// interpolate in 's'
#pragma unroll
for (int j = 0; j < p_cubNq; ++j) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
dfloat Ud2 = 0, Vd2 = 0, Wd2 = 0;
// interpolate in b
#pragma unroll
for (int b = 0; b < p_Nq; ++b) {
dfloat Ijb = s_cubInterpT[b][j];
Ud2 += Ijb * s_Ud1[b][i];
Vd2 += Ijb * s_Vd1[b][i];
Wd2 += Ijb * s_Wd1[b][i];
}
// interpolate in c progressively
#pragma unroll
for (int k = 0; k < p_cubNq; ++k) {
dfloat Ikc = s_cubInterpT[c][k];
r_Ud[j][i][k] += Ikc * Ud2;
r_Vd[j][i][k] += Ikc * Vd2;
r_Wd[j][i][k] += Ikc * Wd2;
}
}
}
}
// Uhat * dr
#pragma unroll p_cubNq
for (int j = 0; j < p_cubNq; ++j) {
#pragma unroll
for (int k = 0; k < p_cubNq; ++k) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
dfloat Udr = 0;
dfloat Vdr = 0;
dfloat Wdr = 0;
#pragma unroll
for (int n = 0; n < p_cubNq; ++n) {
dfloat Din = s_cubD[i][n];
Udr += Din * r_Ud[j][n][k];
Vdr += Din * r_Vd[j][n][k];
Wdr += Din * r_Wd[j][n][k];
}
dfloat Uhat = 0.0;
const int id = element * p_cubNp + k * p_cubNq * p_cubNq + j * p_cubNq + i;
#pragma unroll
for (int s = 0; s < p_nEXT; ++s) {
const int s_offset = s * p_NVfields * cubatureOffset;
const dfloat coeff = r_c[s];
Uhat += coeff * conv[id + 0 * cubatureOffset + s_offset];
}
// U*dUdx + V*dUdy + W*dUdz = (U*(drdx*dUdr+dsdx*dUds+dtdx*dUdt) + V*(drdy*dUdr ..))
// I_f^t*(J_f*C_f^t)*G_f*\hat{D}_f*I_f*u
r_U2[j][i][k] = Uhat * Udr;
r_V2[j][i][k] = Uhat * Vdr;
r_W2[j][i][k] = Uhat * Wdr;
}
}
}
// Vhat * ds
#pragma unroll p_cubNq
for (int j = 0; j < p_cubNq; ++j) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
#pragma unroll
for (int k = 0; k < p_cubNq; ++k) {
dfloat Uds = 0;
dfloat Vds = 0;
dfloat Wds = 0;
#pragma unroll
for (int n = 0; n < p_cubNq; ++n) {
dfloat Djn = s_cubD[j][n];
Uds += Djn * r_Ud[n][i][k];
Vds += Djn * r_Vd[n][i][k];
Wds += Djn * r_Wd[n][i][k];
}
dfloat Vhat = 0.0;
const int id = element * p_cubNp + k * p_cubNq * p_cubNq + j * p_cubNq + i;
#pragma unroll
for (int s = 0; s < p_nEXT; ++s) {
const int s_offset = s * p_NVfields * cubatureOffset;
const dfloat coeff = r_c[s];
Vhat += coeff * conv[id + 1 * cubatureOffset + s_offset];
}
// U*dUdx + V*dUdy + W*dUdz = (U*(drdx*dUdr+dsdx*dUds+dtdx*dUdt) + V*(drdy*dUdr ..))
// I_f^t*(J_f*C_f^t)*G_f*\hat{D}_f*I_f*u
r_U2[j][i][k] += Vhat * Uds;
r_V2[j][i][k] += Vhat * Vds;
r_W2[j][i][k] += Vhat * Wds;
}
}
}
// What * dt
#pragma unroll p_cubNq
for (int j = 0; j < p_cubNq; ++j) {
#pragma unroll
for (int k = 0; k < p_cubNq; ++k) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
dfloat Udt = 0;
dfloat Vdt = 0;
dfloat Wdt = 0;
#pragma unroll
for (int n = 0; n < p_cubNq; ++n) {
dfloat Dkn = s_cubD[k][n];
Udt += Dkn * r_Ud[j][i][n];
Vdt += Dkn * r_Vd[j][i][n];
Wdt += Dkn * r_Wd[j][i][n];
}
dfloat What = 0.0;
const int id = element * p_cubNp + k * p_cubNq * p_cubNq + j * p_cubNq + i;
#pragma unroll
for (int s = 0; s < p_nEXT; ++s) {
const int s_offset = s * p_NVfields * cubatureOffset;
const dfloat coeff = r_c[s];
What += coeff * conv[id + 2 * cubatureOffset + s_offset];
}
// U*dUdx + V*dUdy + W*dUdz = (U*(drdx*dUdr+dsdx*dUds+dtdx*dUdt) + V*(drdy*dUdr ..))
// I_f^t*(J_f*C_f^t)*G_f*\hat{D}_f*I_f*u
r_U2[j][i][k] += What * Udt;
r_V2[j][i][k] += What * Vdt;
r_W2[j][i][k] += What * Wdt;
}
}
}
// now project back in t
#pragma unroll
for (int c = 0; c < p_Nq; ++c) {
#pragma unroll
for (int j = 0; j < p_cubNq; ++j) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
dfloat rhsU = 0, rhsV = 0, rhsW = 0;
#pragma unroll
for (int k = 0; k < p_cubNq; ++k) {
dfloat Ikc = s_cubInterpT[c][k];
rhsU += Ikc * r_U2[j][i][k];
rhsV += Ikc * r_V2[j][i][k];
rhsW += Ikc * r_W2[j][i][k];
}
s_U[j][i] = rhsU;
s_V[j][i] = rhsV;
s_W[j][i] = rhsW;
}
}
#pragma unroll
for (int b = 0; b < p_Nq; ++b) {
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
dfloat rhsU = 0, rhsV = 0, rhsW = 0;
#pragma unroll
for (int j = 0; j < p_cubNq; ++j) {
dfloat Ijb = s_cubInterpT[b][j];
rhsU += Ijb * s_U[j][i];
rhsV += Ijb * s_V[j][i];
rhsW += Ijb * s_W[j][i];
}
s_Ud[b][i] = rhsU;
s_Vd[b][i] = rhsV;
s_Wd[b][i] = rhsW;
}
}
#pragma unroll
for (int b = 0; b < p_Nq; ++b) {
#pragma unroll
for (int a = 0; a < p_Nq; ++a) {
dfloat rhsU = 0, rhsV = 0, rhsW = 0;
#pragma unroll
for (int i = 0; i < p_cubNq; ++i) {
dfloat Iia = s_cubInterpT[a][i];
rhsU += Iia * s_Ud[b][i];
rhsV += Iia * s_Vd[b][i];
rhsW += Iia * s_Wd[b][i];
}
const int id = element * p_Np + c * p_Nq * p_Nq + b * p_Nq + a;
dfloat invLMM = p_MovingMesh ? 0.0 : invLumpedMassMatrix[id];
dfloat bdivw = 0.0;
if (p_MovingMesh) {
#pragma unroll
for (int s = 0; s < p_nEXT; s++) {
const dfloat coeff = r_c[s];
invLMM += coeff * invLumpedMassMatrix[id + s * offset];
bdivw += coeff * BdivW[id + s * offset];
}
}
NU[id + 0 * offset + NUoffset] = (rhsU - bdivw * Ud[id + 0 * offset]) * invLMM;
NU[id + 1 * offset + NUoffset] = (rhsV - bdivw * Ud[id + 1 * offset]) * invLMM;
NU[id + 2 * offset + NUoffset] = (rhsW - bdivw * Ud[id + 2 * offset]) * invLMM;
}
}
}
}
}
|
compute_sentinel_refl.c | /******************************************************************************
FILE: compute_sentinel_refl.c
PURPOSE: Contains functions for handling the Sentinel-2 TOA reflectance and
surface reflectance corrections.
PROJECT: Land Satellites Data System Science Research and Development (LSRD)
at the USGS EROS
LICENSE TYPE: NASA Open Source Agreement Version 1.3
NOTES:
******************************************************************************/
//#define USE_GCTP 1
/* GAIL uncomment to use the GCTP library */
#include "lasrc.h"
#include "time.h"
#include "aero_interp.h"
#include "poly_coeff.h"
#include "read_level1_qa.h"
#include "read_level2_qa.h"
#ifndef USE_GCTP
#include "utmtodeg.h"
#endif
#define WRITE_TAERO 1
/******************************************************************************
MODULE: read_sentinel_toa_refl
PURPOSE: Reads the input TOA Sentinel reflectance bands and converts all bands
to 10m resolution.
RETURN VALUE:
Type = int
Value Description
----- -----------
ERROR Error reading the input TOA reflectance
SUCCESS No errors encountered
PROJECT: Land Satellites Data System Science Research and Development (LSRD)
at the USGS EROS
NOTES:
******************************************************************************/
int read_sentinel_toa_refl
(
Input_t *input, /* I: input structure for the Sentinel product */
Espa_internal_meta_t *xml_metadata,
/* I: XML metadata structure */
float **toaband /* O: output TOA reflectance values (unscaled) */
)
{
char errmsg[STR_SIZE]; /* error message */
char FUNC_NAME[] = "read_sentinel_toa_refl"; /* function name */
int i; /* looping variable for pixels */
int ib; /* looping variable for input bands */
int nlines10 = -99; /* number of lines in 10m reflectance bands */
int nsamps10 = -99; /* number of samps in 10m reflectance bands */
int nlines20 = -99; /* number of lines in 20m reflectance bands */
int nsamps20 = -99; /* number of samps in 20m reflectance bands */
int nlines60 = -99; /* number of lines in 20m reflectance bands */
int nsamps60 = -99; /* number of samps in 20m reflectance bands */
uint16 *tmp_band = NULL; /* array for input 10m image data for a single
band, nlines10 x nsamps10 */
uint16 *tmp20_band = NULL; /* array for input 20m image data for a single
band, nlines20 x nsamps20 */
uint16 *tmp60_band = NULL; /* array for input 60m image data for a single
band, nlines60 x nsamps60 */
Espa_band_meta_t *bmeta = xml_metadata->band;
/* pointer to the array of band metadata */
/* Determine the 10m, 20m, and 60m number of lines and samples */
for (ib = 0; ib < xml_metadata->nbands; ib++)
{
/* Use band 2 for the representative 10m band */
if (!strcmp (xml_metadata->band[ib].name, "B02"))
{
nlines10 = xml_metadata->band[ib].nlines;
nsamps10 = xml_metadata->band[ib].nsamps;
}
/* Use band 5 for the representative 20m band */
else if (!strcmp (xml_metadata->band[ib].name, "B05"))
{
nlines20 = xml_metadata->band[ib].nlines;
nsamps20 = xml_metadata->band[ib].nsamps;
}
/* Use band 1 for the representative 60m band */
else if (!strcmp (xml_metadata->band[ib].name, "B01"))
{
nlines60 = xml_metadata->band[ib].nlines;
nsamps60 = xml_metadata->band[ib].nsamps;
}
}
/* Make sure they were found and are valid */
if (nlines10 == -99 || nsamps10 == -99)
{
sprintf (errmsg, "Error obtaining the nlines/nsamps for 10m band");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
if (nlines20 == -99 || nsamps20 == -99)
{
sprintf (errmsg, "Error obtaining the nlines/nsamps for 20m band");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
if (nlines60 == -99 || nsamps60 == -99)
{
sprintf (errmsg, "Error obtaining the nlines/nsamps for 60m band");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Allocate memory for 10m, 20m 60m band data */
tmp_band = calloc (nlines10 * nsamps10, sizeof (uint16));
if (tmp_band == NULL)
{
sprintf (errmsg, "Error allocating memory for temporary 10m band");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
tmp20_band = calloc (nlines20 * nsamps20, sizeof (uint16));
if (tmp20_band == NULL)
{
sprintf (errmsg, "Error allocating memory for temporary 20m band");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
tmp60_band = calloc (nlines60 * nsamps60, sizeof (uint16));
if (tmp60_band == NULL)
{
sprintf (errmsg, "Error allocating memory for temporary 60m band");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Loop through the Sentinel-2 bands */
for (ib = DNS_BAND1; ib <= DNS_BAND12; ib++)
{
switch (ib)
{
/* 10m bands read as-is (4) */
case DNS_BAND2:
case DNS_BAND3:
case DNS_BAND4:
case DNS_BAND8:
/* Read the input band data */
if (get_input_refl_lines (input, ib, 0, nlines10, nsamps10,
tmp_band) != SUCCESS)
{
sprintf (errmsg, "Error reading Sentinel TOA 10m band %d",
ib+1);
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
break;
/* 20m bands convert to 10m (6) */
case DNS_BAND5:
case DNS_BAND6:
case DNS_BAND7:
case DNS_BAND8A:
case DNS_BAND11:
case DNS_BAND12:
/* Read the input band data */
if (get_input_refl_lines (input, ib, 0, nlines20, nsamps20,
tmp20_band) != SUCCESS)
{
sprintf (errmsg, "Error reading Sentinel TOA 20m band %d",
ib+1);
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Convert to 10m */
if (convert_to_10m (nlines20, nsamps20, nlines10, nsamps10,
tmp20_band, tmp_band) != SUCCESS)
{
sprintf (errmsg, "Error converting 20m band %d", ib+1);
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
break;
/* 60m bands convert to 10m (3, but skipping bands 9&10) */
case DNS_BAND1:
#ifdef PROC_ALL_BANDS
case DNS_BAND9:
case DNS_BAND10:
#endif
/* Read the input band data */
if (get_input_refl_lines (input, ib, 0, nlines60, nsamps60,
tmp60_band) != SUCCESS)
{
sprintf (errmsg, "Error reading Sentinel TOA 60m band %d",
ib+1);
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Convert to 10m */
if (convert_to_10m (nlines60, nsamps60, nlines10, nsamps10,
tmp60_band, tmp_band) != SUCCESS)
{
sprintf (errmsg, "Error converting 60m band %d", ib+1);
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
break;
} /* switch ib */
/* Unscale the data */
for (i = 0; i < nlines10 * nsamps10; i++)
{
/* If this is fill, leave the value as-is for masking later. O/W
unscale the data. */
if (tmp_band[i] == bmeta[ib].fill_value)
toaband[ib][i] = tmp_band[i];
else
toaband[ib][i] = tmp_band[i] * bmeta[ib].scale_factor +
bmeta[ib].add_offset;
}
} /* for ib */
/* Free the memory */
free (tmp_band);
free (tmp20_band);
free (tmp60_band);
return (SUCCESS);
}
/******************************************************************************
MODULE: compute_sentinel_sr_refl
PURPOSE: Computes the surface reflectance for all the Sentinel-2 reflectance
bands.
RETURN VALUE:
Type = int
Value Description
----- -----------
ERROR Error computing the reflectance
SUCCESS No errors encountered
PROJECT: Land Satellites Data System Science Research and Development (LSRD)
at the USGS EROS
NOTES:
1. Initializes the variables and data arrays from the lookup table and
auxiliary files.
2. The tauray array was originally read in from a static ASCII file, but it is
now hardcoded to save time from reading the file each time. This file was
generated (like many of the other auxiliary input tables) by running 6S and
storing the coefficients.
4. Aerosols are retrieved for all non-fill pixels. If the aerosol fails the
model residual or NDVI test, then the pixel is flagged as water. All water
pixels are run through a water-specific aerosol retrieval. If the model
residual fails, then that pixel is marked as failed aerosol retrieval. Any
pixel that failed retrieval is then interpolated using an average of the
clear (valid land pixel aerosols) and water (valid water pixel aerosols).
Those final aerosol values are used for the surface reflectance corrections.
5. Cloud-based QA information is not processed in this algorithm.
******************************************************************************/
int compute_sentinel_sr_refl
(
Input_t *input, /* I: input structure for the Landsat product */
Espa_internal_meta_t *xml_metadata,
/* I: XML metadata structure */
char *xml_infile, /* I: input XML filename */
uint16 *qaband, /* O: QA band generated for image, nlines x nsamps */
int nlines, /* I: number of lines in reflectance, thermal bands */
int nsamps, /* I: number of samps in reflectance, thermal bands */
float pixsize, /* I: pixel size for the reflectance bands */
float **toaband, /* I: unscaled TOA reflectance bands, nlines x nsamps */
float **sband, /* O: output unscaled SR bands, nlines x nsamps */
uint16 *out_band, /* I: allocated array for writing scaled output */
float xts, /* I: scene center solar zenith angle (deg) */
float xmus, /* I: cosine of solar zenith angle */
bool use_orig_aero, /* I: use the original aerosol handling if specified,
o/w use the semi-empirical approach */
char *anglehdf, /* I: angle HDF filename */
char *intrefnm, /* I: intrinsic reflectance filename */
char *transmnm, /* I: transmission filename */
char *spheranm, /* I: spherical albedo filename */
char *cmgdemnm, /* I: climate modeling grid DEM filename */
char *rationm, /* I: ratio averages filename */
char *auxnm /* I: auxiliary filename for ozone and water vapor */
)
{
char errmsg[STR_SIZE]; /* error message */
char FUNC_NAME[] = "compute_sentinel_sr_refl"; /* function name */
Sat_t sat = input->meta.sat; /* satellite */
int retval; /* return status */
int i, j; /* looping variable for pixels */
int ib; /* looping variable for input bands */
int iband; /* current band */
int curr_pix; /* current pixel in 1D arrays of nlines x nsamps */
int iline; /* current line in the 6x6 window for atm corr */
int isamp; /* current sample in the 6x6 window for atm corr */
int ew_line; /* ending line in the 6x6 window for atm corr */
int ew_samp; /* ending sample in the 6x6 window for atm corr */
int curr_win_pix; /* current pixel in the 6x6 window for atm corr */
int pix_count; /* count of valid pixels in the 5x5 window */
long npixels; /* number of pixels to process */
bool is_fill; /* flag for whether the current pixel is fill */
float tmpf; /* temporary floating point value */
float rotoa; /* top of atmosphere reflectance */
float roslamb; /* lambertian surface reflectance */
float tgo; /* other gaseous transmittance (tgog * tgoz) */
float roatm; /* intrinsic atmospheric reflectance */
float ttatmg; /* total atmospheric transmission */
float satm; /* atmosphere spherical albedo */
float tgo_x_roatm; /* variable for tgo * roatm */
float tgo_x_ttatmg; /* variable for tgo * ttatmg */
float xrorayp; /* reflectance of the atmosphere due to molecular
(Rayleigh) scattering */
float erelc[NSR_BANDS]; /* band ratio variable for refl bands */
float troatm[NSR_BANDS]; /* atmospheric reflectance table for refl bands */
int iband1; /* band index (zero-based) */
float raot; /* AOT reflectance */
/* raot values for three different eps values */
float residual; /* model residual */
float residual1, residual2, residual3;
/* residuals for 3 different eps values */
float rsurf; /* surface reflectance */
float corf; /* aerosol impact (higher values represent high
aerosol) */
float ros1,ros4,ros5; /* surface reflectance for bands 1, 4, and 5 */
#ifndef _OPENMP
int curr_tmp_percent; /* percentage for current line */
int tmp_percent; /* current percentage for printing status */
#endif
float lat, lon; /* pixel lat, long location */
int lcmg, scmg; /* line/sample index for the CMG */
int lcmg1, scmg1; /* line+1/sample+1 index for the CMG */
float u, v; /* line/sample index for the CMG */
float one_minus_u; /* 1.0 - u */
float one_minus_v; /* 1.0 - v */
float one_minus_u_x_one_minus_v; /* (1.0 - u) * (1.0 - v) */
float one_minus_u_x_v; /* (1.0 - u) * v */
float u_x_one_minus_v; /* u * (1.0 - v) */
float u_x_v; /* u * v */
float ndwi_th1, ndwi_th2; /* values for NDWI calculations */
float xcmg, ycmg; /* x/y location for CMG */
float xndwi; /* calculated NDWI value */
int uoz11, uoz21, uoz12, uoz22; /* ozone at line,samp; line, samp+1;
line+1, samp; and line+1, samp+1 */
float pres11, pres12, pres21, pres22; /* pressure at line,samp;
line, samp+1; line+1, samp; and line+1, samp+1 */
float wv11, wv12, wv21, wv22; /* water vapor at line,samp;
line, samp+1; line+1, samp; and line+1, samp+1 */
uint8 *ipflag = NULL; /* QA flag to assist with aerosol interpolation,
nlines x nsamps */
float *twvi = NULL; /* interpolated water vapor value,
nlines x nsamps */
float *tozi = NULL; /* interpolated ozone value, nlines x nsamps */
float *tp = NULL; /* interpolated pressure value, nlines x nsamps */
float *taero = NULL; /* aerosol values for each pixel, nlines x nsamps */
float *teps = NULL; /* angstrom coeff for each pixel, nlines x nsamps */
Espa_band_meta_t *bmeta = xml_metadata->band;
/* pointer to the array of band metadata */
/* Vars for forward/inverse mapping space */
Geoloc_t *space = NULL; /* structure for geolocation information */
Space_def_t space_def; /* structure to define the space mapping */
#ifdef USE_GCTP
Img_coord_float_t img; /* coordinate in line/sample space */
Geo_coord_t geo; /* coordinate in lat/long space */
#endif
/* Lookup table variables */
float eps; /* angstrom coefficient */
float eps1, eps2, eps3; /* eps values for three runs */
float xtv; /* observation zenith angle (deg) */
float xmuv; /* cosine of observation zenith angle */
float xfi; /* azimuthal difference between the sun and
observation angle (deg) */
float cosxfi; /* cosine of azimuthal difference */
float xtsstep; /* solar zenith step value */
float xtsmin; /* minimum solar zenith value */
float xtvstep; /* observation step value */
float xtvmin; /* minimum observation value */
float *rolutt = NULL; /* intrinsic reflectance table
[NSR_BANDS x NPRES_VALS x NAOT_VALS x NSOLAR_VALS] */
float *transt = NULL; /* transmission table
[NSR_BANDS x NPRES_VALS x NAOT_VALS x NSUNANGLE_VALS] */
float *sphalbt = NULL; /* spherical albedo table
[NSR_BANDS x NPRES_VALS x NAOT_VALS] */
float *normext = NULL; /* aerosol extinction coefficient at the current
wavelength (normalized at 550nm)
[NSR_BANDS x NPRES_VALS x NAOT_VALS] */
float *tsmax = NULL; /* maximum scattering angle table
[NVIEW_ZEN_VALS x NSOLAR_ZEN_VALS] */
float *tsmin = NULL; /* minimum scattering angle table
[NVIEW_ZEN_VALS x NSOLAR_ZEN_VALS] */
float *nbfi = NULL; /* number of azimuth angles
[NVIEW_ZEN_VALS x NSOLAR_ZEN_VALS] */
float *nbfic = NULL; /* communitive number of azimuth angles
[NVIEW_ZEN_VALS x NSOLAR_ZEN_VALS] */
float *ttv = NULL; /* view angle table
[NVIEW_ZEN_VALS x NSOLAR_ZEN_VALS] */
float tts[22]; /* sun angle table */
int32 indts[22]; /* index for sun angle table */
int iaots; /* index for AOTs */
/* Atmospheric correction coefficient variables (semi-empirical approach) */
float tgo_arr[NREFL_BANDS]; /* per-band other gaseous transmittance */
float roatm_arr[NREFL_BANDS][NAOT_VALS]; /* per band AOT vals for roatm */
float ttatmg_arr[NREFL_BANDS][NAOT_VALS]; /* per band AOT vals for ttatmg */
float satm_arr[NREFL_BANDS][NAOT_VALS]; /* per band AOT vals for satm */
float roatm_coef[NREFL_BANDS][NCOEF]; /* per band poly coeffs for roatm */
float ttatmg_coef[NREFL_BANDS][NCOEF]; /* per band poly coeffs for ttatmg */
float satm_coef[NREFL_BANDS][NCOEF]; /* per band poly coeffs for satm */
float normext_p0a3_arr[NREFL_BANDS]; /* per band normext[iband][0][3] */
int roatm_iaMax[NREFL_BANDS];
int ia; /* looping variable for AOTs */
int iaMaxTemp; /* max temp for current AOT level */
/* Auxiliary file variables */
int16 *dem = NULL; /* CMG DEM data array [DEM_NBLAT x DEM_NBLON] */
int16 *andwi = NULL; /* avg NDWI [RATIO_NBLAT x RATIO_NBLON] */
int16 *sndwi = NULL; /* standard NDWI [RATIO_NBLAT x RATIO_NBLON] */
int16 *ratiob1 = NULL; /* mean band1 ratio [RATIO_NBLAT x RATIO_NBLON] */
int16 *ratiob2 = NULL; /* mean band2 ratio [RATIO_NBLAT x RATIO_NBLON] */
int16 *ratiob7 = NULL; /* mean band7 ratio [RATIO_NBLAT x RATIO_NBLON] */
int16 *intratiob1 = NULL; /* intercept band1 ratio,
RATIO_NBLAT x RATIO_NBLON */
int16 *intratiob2 = NULL; /* intercept band2 ratio
RATIO_NBLAT x RATIO_NBLON */
int16 *intratiob7 = NULL; /* intercept band7 ratio
RATIO_NBLAT x RATIO_NBLON */
int16 *slpratiob1 = NULL; /* slope band1 ratio
RATIO_NBLAT x RATIO_NBLON */
int16 *slpratiob2 = NULL; /* slope band2 ratio
RATIO_NBLAT x RATIO_NBLON */
int16 *slpratiob7 = NULL; /* slope band7 ratio
RATIO_NBLAT x RATIO_NBLON */
uint16 *wv = NULL; /* water vapor values [CMG_NBLAT x CMG_NBLON] */
uint8 *oz = NULL; /* ozone values [CMG_NBLAT x CMG_NBLON] */
float raot550nm; /* nearest input value of AOT */
float uoz = 0.0; /* total column ozone */
float uwv = 0.0; /* total column water vapor (precipital water vapor) */
float pres = 0.0; /* surface pressure */
float rb1; /* band ratio 1 (unscaled) */
float rb2; /* band ratio 2 (unscaled) */
float slpr11, slpr12, slpr21, slpr22; /* band ratio slope at line,samp;
line, samp+1; line+1, samp; and line+1, samp+1 */
float intr11, intr12, intr21, intr22; /* band ratio intercept at line,samp;
line, samp+1; line+1, samp; and line+1, samp+1 */
float slprb1, slprb2, slprb7; /* interpolated band ratio slope values for
band ratios 1, 2, 7 */
float intrb1, intrb2, intrb7; /* interpolated band ratio intercept values
for band ratios 1, 2, 7 */
int ratio_pix11; /* pixel location for ratio products [lcmg][scmg] */
int ratio_pix12; /* pixel location for ratio products [lcmg][scmg+1] */
int ratio_pix21; /* pixel location for ratio products [lcmg+1][scmg] */
int ratio_pix22; /* pixel location for ratio products [lcmg+1][scmg+1] */
int cmg_pix11; /* pixel location for CMG/DEM products [lcmg][scmg] */
int cmg_pix12; /* pixel location for CMG/DEM products [lcmg][scmg+1] */
int cmg_pix21; /* pixel location for CMG/DEM products [lcmg+1][scmg] */
int cmg_pix22; /* pixel location for CMG/DEM products [lcmg+1][scmg+1] */
/* Variables for finding the eps that minimizes the residual */
double xa, xb, xc, xd, xe, xf; /* coefficients */
double coefa, coefb; /* coefficients */
float epsmin; /* eps which minimizes the residual */
float resepsmin; /* residual eps which minimizes residual */
/* Output file info */
time_t mytime; /* timing variable */
Output_t *sr_output = NULL; /* output structure and metadata for the SR
product */
Envi_header_t envi_hdr; /* output ENVI header information */
char envi_file[STR_SIZE]; /* ENVI filename */
char *cptr = NULL; /* pointer to the file extension */
/* Table constants */
float aot550nm[NAOT_VALS] = /* AOT look-up table */
{0.01, 0.05, 0.10, 0.15, 0.20, 0.30, 0.40, 0.60, 0.80, 1.00, 1.20,
1.40, 1.60, 1.80, 2.00, 2.30, 2.60, 3.00, 3.50, 4.00, 4.50, 5.00};
float tpres[NPRES_VALS] = /* surface pressure table */
{1050.0, 1013.0, 900.0, 800.0, 700.0, 600.0, 500.0};
/* Atmospheric correction variables */
/* Look up table for atmospheric and geometric quantities. Taurary comes
from tauray-ldcm/msi.ASC and the oz, wv, og variables come from
gascoef-modis/msi.ASC. */
/* NOTE: coefficients for bands 9 and 10 may have been removed from these
arrays since those bands might not be processed */
#ifdef PROC_ALL_BANDS
/* Process all bands if turned on */
float tauray[NSRS_BANDS] = /* molecular optical thickness
coefficients -- produced by running 6S */
{0.23432, 0.15106, 0.09102, 0.04535, 0.03584, 0.02924, 0.02338, 0.01847,
0.01560, 0.01092, 0.00243, 0.00128, 0.00037};
double oztransa[NSRS_BANDS] = /* ozone transmission coeff */
{-0.00264691, -0.0272572, -0.0986512, -0.0500348, -0.0204295,
-0.0108641, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001};
double wvtransa[NSRS_BANDS] = /* water vapor transmission coeff */
{2.29849e-27, 2.29849e-27, 0.000777307, 0.00361051, 0.0141249,
0.0137067, 0.00410217, 0.0285871, 0.000390755, 0.00001, 0.01,
0.000640155, 0.018006};
double wvtransb[NSRS_BANDS] = /* water vapor transmission coeff */
{0.999742, 0.999742, 0.891099, 0.754895, 0.75596, 0.763497, 0.74117,
0.578722, 0.900899, 0.45818, 1.0, 0.943712, 0.647517};
double ogtransa1[NSRS_BANDS] = /* other gases transmission coeff */
{4.91586e-20, 4.91586e-20, 4.91586e-20, 4.91586e-20, 5.3367e-06,
4.91586e-20, 9.03583e-05, 1.64109e-09, 1.90458e-05, 4.91586e-20,
7.62429e-06, 0.0212751, 0.0243065};
double ogtransb0[NSRS_BANDS] = /* other gases transmission coeff */
{0.000197019, 0.000197019, 0.000197019, 0.000197019, -0.980313,
0.000197019, 0.0265393, 1.E-10, 0.0322844, 0.000197019, 0.000197019,
0.000197019, 0.000197019};
double ogtransb1[NSRS_BANDS] = /* other gases transmission coeff */
{9.57011e-16, 9.57011e-16, 9.57011e-16, 9.57011e-16, 1.33639,
9.57011e-16, 0.0532256, 1.E-10, -0.0219907, 9.57011e-16, -0.216849,
0.0116062, 0.0604312};
#else
/* Skip bands 9 and 10 as default for ESPA */
float tauray[NSRS_BANDS] = /* molecular optical thickness
coefficients -- produced by running 6S */
{0.23432, 0.15106, 0.09102, 0.04535, 0.03584, 0.02924, 0.02338, 0.01847,
0.01560, 0.00128, 0.00037};
double oztransa[NSRS_BANDS] = /* ozone transmission coeff */
{-0.00264691, -0.0272572, -0.0986512, -0.0500348, -0.0204295,
-0.0108641, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001};
double wvtransa[NSRS_BANDS] = /* water vapor transmission coeff */
{2.29849e-27, 2.29849e-27, 0.000777307, 0.00361051, 0.0141249,
0.0137067, 0.00410217, 0.0285871, 0.000390755, 0.000640155, 0.018006};
double wvtransb[NSRS_BANDS] = /* water vapor transmission coeff */
{0.999742, 0.999742, 0.891099, 0.754895, 0.75596, 0.763497, 0.74117,
0.578722, 0.900899, 0.943712, 0.647517};
double ogtransa1[NSRS_BANDS] = /* other gases transmission coeff */
{4.91586e-20, 4.91586e-20, 4.91586e-20, 4.91586e-20, 5.3367e-06,
4.91586e-20, 9.03583e-05, 1.64109e-09, 1.90458e-05, 0.0212751,
0.0243065};
double ogtransb0[NSRS_BANDS] = /* other gases transmission coeff */
{0.000197019, 0.000197019, 0.000197019, 0.000197019, -0.980313,
0.000197019, 0.0265393, 1.E-10, 0.0322844, 0.000197019, 0.000197019};
double ogtransb1[NSRS_BANDS] = /* other gases transmission coeff */
{9.57011e-16, 9.57011e-16, 9.57011e-16, 9.57011e-16, 1.33639,
9.57011e-16, 0.0532256, 1.E-10, -0.0219907, 0.0116062, 0.0604312};
#endif
#ifdef WRITE_TAERO
FILE *aero_fptr=NULL; /* file pointer for aerosol files */
#endif
/* Start processing */
mytime = time(NULL);
printf ("Start surface reflectance corrections: %s", ctime(&mytime));
#ifdef PROC_ALL_BANDS
printf ("All Sentinel-2 bands will be processed, including bands 9 and 10, "
"which is not the default.\n");
#endif
/* Allocate memory for the many arrays needed to do the surface reflectance
computations */
npixels = nlines * nsamps;
retval = sentinel_memory_allocation_sr (nlines, nsamps, &ipflag, &twvi,
&tozi, &tp, &taero, &teps, &dem, &andwi, &sndwi, &ratiob1, &ratiob2,
&ratiob7, &intratiob1, &intratiob2, &intratiob7, &slpratiob1,
&slpratiob2, &slpratiob7, &wv, &oz, &rolutt, &transt, &sphalbt,
&normext, &tsmax, &tsmin, &nbfic, &nbfi, &ttv);
if (retval != SUCCESS)
{
sprintf (errmsg, "Error allocating memory for the data arrays needed "
"for Sentinel surface reflectance calculations.");
error_handler (false, FUNC_NAME, errmsg);
return (ERROR);
}
/* Initialize the geolocation space applications */
if (!get_geoloc_info (xml_metadata, &space_def))
{
sprintf (errmsg, "Getting the space definition from the XML file");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
#ifdef USE_GCTP
space = setup_mapping (&space_def);
if (space == NULL)
{
sprintf (errmsg, "Setting up the geolocation mapping");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
#endif
/* Initialize the look up tables and atmospheric correction variables.
view zenith initialized to scene center (xtv)
azimuthal difference between sun and obs angle initialize to difference
at scene center (xfi)
surface pressure is initialized to the pressure at the center of the
scene (using the DEM) (pres)
water vapor is initialized to the value at the center of the scene (uwv)
ozone is initialized to the value at the center of the scene (uoz) */
retval = init_sr_refl (nlines, nsamps, input, &space_def, space, anglehdf,
intrefnm, transmnm, spheranm, cmgdemnm, rationm, auxnm, &eps, &iaots,
&xtv, &xmuv, &xfi, &cosxfi, &raot550nm, &pres, &uoz, &uwv, &xtsstep,
&xtsmin, &xtvstep, &xtvmin, tsmax, tsmin, tts, ttv, indts, rolutt,
transt, sphalbt, normext, nbfic, nbfi, dem, andwi, sndwi, ratiob1,
ratiob2, ratiob7, intratiob1, intratiob2, intratiob7, slpratiob1,
slpratiob2, slpratiob7, wv, oz);
if (retval != SUCCESS)
{
sprintf (errmsg, "Error initializing the lookup tables and "
"atmospheric correction variables.");
error_handler (false, FUNC_NAME, errmsg);
return (ERROR);
}
/* Loop through all the reflectance bands and perform atmospheric
corrections based on climatology */
mytime = time(NULL);
printf ("Performing atmospheric corrections for each Sentinel reflectance "
"band ... %s", ctime(&mytime)); fflush(stdout);
/* Flag fill pixels as any pixel with all bands containing fill values.
This used to be flag as fill if any pixel is fill, but often the S2
values for non-visible bands are a value of 0. */
for (i = 0; i < npixels; i++)
{
/* Initialize to true and break out if any band is not fill */
is_fill = true;
for (ib = 0; ib <= SRS_BAND12; ib++)
{
if (toaband[ib][i] != bmeta[ib].fill_value)
{
/* No need to look any further */
is_fill = false;
break;
}
} /* end for ib */
/* If this is fill then mask it as such in the various outputs */
if (is_fill)
{
qaband[i] |= (1 << ESPA_L1_DESIGNATED_FILL_BIT);
for (ib = 0; ib <= SRS_BAND12; ib++)
sband[ib][i] = FILL_VALUE;
}
} /* for i */
/* rotoa is not defined for the atmcorlamb2 call, which is ok, but the
roslamb value is not valid upon output. Just set it to 0.0 to be
consistent. */
rotoa = 0.0;
raot550nm = 0.05;
eps = -1.0;
for (ib = 0; ib <= SRS_BAND12; ib++)
{
printf (" Band %s\n", SENTINEL_BANDNAME[ib]); fflush(stdout);
#ifdef PROC_ALL_BANDS
/* Process all bands if turned on */
/* Get the parameters for the atmospheric correction */
if (ib != SRS_BAND9) /* skip the water vapor band */
{
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus, xmuv, xfi,
cosxfi, raot550nm, ib, pres, tpres, aot550nm, rolutt, transt,
xtsstep, xtsmin, xtvstep, xtvmin, sphalbt, normext, tsmax,
tsmin, nbfic, nbfi, tts, indts, ttv, uoz, uwv, tauray,
ogtransa1, ogtransb0, ogtransb1, wvtransa, wvtransb, oztransa,
rotoa, &roslamb, &tgo, &roatm, &ttatmg, &satm, &xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian atmospheric correction "
"type 2.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
}
else
{
/* Use default values for band 9, water vapor band */
tgo = 1.0;
roatm = 0.0;
ttatmg = 1.0;
satm = 0.0;
}
#else
/* Skip bands 9 and 10 as default for ESPA */
/* Get the parameters for the atmospheric correction */
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus, xmuv, xfi,
cosxfi, raot550nm, ib, pres, tpres, aot550nm, rolutt, transt,
xtsstep, xtsmin, xtvstep, xtvmin, sphalbt, normext, tsmax,
tsmin, nbfic, nbfi, tts, indts, ttv, uoz, uwv, tauray,
ogtransa1, ogtransb0, ogtransb1, wvtransa, wvtransb, oztransa,
rotoa, &roslamb, &tgo, &roatm, &ttatmg, &satm, &xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian atmospheric correction "
"type 2.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
#endif
tgo_x_roatm = tgo * roatm;
tgo_x_ttatmg = tgo * ttatmg;
/* Perform atmospheric corrections for reflectance bands */
#ifdef _OPENMP
#pragma omp parallel for private (i, roslamb)
#endif
for (i = 0; i < npixels; i++)
{
/* If this pixel is not fill then handle the atmospheric
correction */
if (!level1_qa_is_fill (qaband[i]))
{
/* Apply the atmospheric corrections (ignoring the Rayleigh
scattering component and water vapor), and store the
scaled value for further corrections. (NOTE: the full
computations are in atmcorlamb2) */
roslamb = toaband[ib][i] - tgo_x_roatm;
roslamb /= tgo_x_ttatmg + satm * roslamb;
sband[ib][i] = roslamb;
}
} /* end for i */
} /* for ib */
printf ("\n");
/* Start the retrieval of atmospheric correction parameters for each band */
mytime = time(NULL);
printf ("Starting retrieval of atmospheric correction parameters ... %s",
ctime(&mytime)); fflush(stdout);
/* Get the coefficients for the semi-empirical atmospheric correction */
if (!use_orig_aero)
{
mytime = time(NULL);
printf ("Obtaining the coefficients for the semi-empirical approach "
"... %s", ctime(&mytime));
for (ib = 0; ib <= SRS_BAND12; ib++)
{
/* Get the parameters for the atmospheric correction */
/* rotoa is not defined for this call, which is ok, but the
roslamb value is not valid upon output. Just set it to 0.0 to
be consistent. */
normext_p0a3_arr[ib] = normext[ib * NPRES_VALS * NAOT_VALS + 0 + 3];
/* normext[ib][0][3]; */
rotoa = 0.0;
eps = -1.0;
for (ia = 0; ia < NAOT_VALS; ia++)
{
raot550nm = aot550nm[ia];
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus, xmuv,
xfi, cosxfi, raot550nm, ib, pres, tpres, aot550nm, rolutt,
transt, xtsstep, xtsmin, xtvstep, xtvmin, sphalbt, normext,
tsmax, tsmin, nbfic, nbfi, tts, indts, ttv, uoz, uwv,
tauray, ogtransa1, ogtransb0, ogtransb1, wvtransa, wvtransb,
oztransa, rotoa, &roslamb, &tgo, &roatm, &ttatmg, &satm,
&xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian atmospheric "
"correction type 2 for band %d.", ib);
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
/* Store the AOT-related variables for use in the atmospheric
corrections */
roatm_arr[ib][ia] = roatm;
ttatmg_arr[ib][ia] = ttatmg;
satm_arr[ib][ia] = satm;
}
/* Store the band-related variables for use in the atmospheric
corrections. tgo and xrorayp are the same for each AOT, so just
save the last set for this band. */
tgo_arr[ib] = tgo;
}
/* Setup the 3rd order polynomial coefficients for the semi-empirical
approach in the aerosol inversion */
for (ib = 0; ib <= SRS_BAND12; ib++)
{
/* Determine the maximum AOT index */
iaMaxTemp = 1;
for (ia = 1; ia < NAOT_VALS; ia++)
{
if (ia == NAOT_VALS-1)
iaMaxTemp = NAOT_VALS-1;
if ((roatm_arr[ib][ia] - roatm_arr[ib][ia-1]) > ESPA_EPSILON)
continue;
else
{
iaMaxTemp = ia-1;
break;
}
}
/* Get the polynomial coefficients for roatm */
roatm_iaMax[ib] = iaMaxTemp;
get_3rd_order_poly_coeff (aot550nm, roatm_arr[ib], iaMaxTemp,
roatm_coef[ib]);
/* Get the polynomial coefficients for ttatmg */
get_3rd_order_poly_coeff (aot550nm, ttatmg_arr[ib], NAOT_VALS,
ttatmg_coef[ib]);
/* Get the polynomial coefficients for satm */
get_3rd_order_poly_coeff (aot550nm, satm_arr[ib], NAOT_VALS,
satm_coef[ib]);
}
} /* if !use_orig_aero */
/* If using the original aerosol approach we need some auxiliary data to
be interpolated for every pixel so it's available for the final aerosol
correction */
if (use_orig_aero)
{
mytime = time(NULL);
printf ("Interpolating the auxiliary data ... %s", ctime(&mytime));
#if defined(_OPENMP) && defined(USE_GCTP)
#pragma omp parallel for private (i, j, curr_pix, img, geo, lat, lon, xcmg, ycmg, lcmg, scmg, lcmg1, scmg1, u, v, one_minus_u, one_minus_v, one_minus_u_x_one_minus_v, one_minus_u_x_v, u_x_one_minus_v, u_x_v, cmg_pix11, cmg_pix12, cmg_pix21, cmg_pix22, wv11, wv12, wv21, wv22, uoz11, uoz12, uoz21, uoz22, pres11, pres12, pres21, pres22)
#elif defined(_OPENMP)
#pragma omp parallel for private (i, j, curr_pix, lat, lon, xcmg, ycmg, lcmg, scmg, lcmg1, scmg1, u, v, one_minus_u, one_minus_v, one_minus_u_x_one_minus_v, one_minus_u_x_v, u_x_one_minus_v, u_x_v, cmg_pix11, cmg_pix12, cmg_pix21, cmg_pix22, wv11, wv12, wv21, wv22, uoz11, uoz12, uoz21, uoz22, pres11, pres12, pres21, pres22)
#endif
for (i = 0; i < nlines; i++)
{
curr_pix = i * nsamps;
for (j = 0; j < nsamps; j++, curr_pix++)
{
/* If this pixel is fill, do not process */
if (level1_qa_is_fill (qaband[i]))
continue;
/* Get the lat/long for the current pixel */
#ifdef USE_GCTP
img.l = i - 0.5;
img.s = j + 0.5;
img.is_fill = false;
if (!from_space (space, &img, &geo))
{
sprintf (errmsg, "Mapping line/sample (%d, %d) to "
"geolocation coords", i, j);
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
lat = geo.lat * RAD2DEG;
lon = geo.lon * RAD2DEG;
#else
utmtodeg (&space_def, i, j, &lat, &lon);
#endif
/* Use that lat/long to determine the line/sample in the
CMG-related lookup tables, using the center of the UL
pixel. Note, we are basically making sure the line/sample
combination falls within -90, 90 and -180, 180 global climate
data boundaries. However, the source code below uses lcmg+1
and scmg+1, which for some scenes may wrap around the
dateline or the poles. Thus we need to wrap the CMG data
around to the beginning of the array. */
/* Each CMG pixel is 0.05 x 0.05 degrees. Use the center of the
pixel for each calculation. Negative latitude values should
be the largest line values in the CMG grid. Negative
longitude values should be the smallest sample values in the
CMG grid. */
/* The line/sample calculation from the x/ycmg values are not
rounded. The interpolation of the value using line+1 and
sample+1 are based on the truncated numbers, therefore
rounding up is not appropriate. */
ycmg = (89.975 - lat) * 20.0; /* vs / 0.05 */
xcmg = (179.975 + lon) * 20.0; /* vs / 0.05 */
lcmg = (int) ycmg;
scmg = (int) xcmg;
/* Handle the edges of the lat/long values in the CMG grid */
if (lcmg < 0)
lcmg = 0;
else if (lcmg >= CMG_NBLAT)
lcmg = CMG_NBLAT;
if (scmg < 0)
scmg = 0;
else if (scmg >= CMG_NBLON)
scmg = CMG_NBLON;
/* If the current CMG pixel is at the edge of the CMG array,
then allow the next pixel for interpolation to wrap around
the array */
if (scmg >= CMG_NBLON-1) /* 180 degrees so wrap around */
scmg1 = 0;
else
scmg1 = scmg + 1;
if (lcmg >= CMG_NBLAT-1) /* -90 degrees so wrap around */
lcmg1 = 0;
else
lcmg1 = lcmg + 1;
/* Determine the four CMG pixels to be used for the current
Landsat pixel */
cmg_pix11 = lcmg * CMG_NBLON + scmg;
cmg_pix12 = lcmg * CMG_NBLON + scmg1;
cmg_pix21 = lcmg1 * CMG_NBLON + scmg;
cmg_pix22 = lcmg1 * CMG_NBLON + scmg1;
/* Get the water vapor pixels. If the water vapor value is
fill (=0), then use it as-is. */
wv11 = wv[cmg_pix11];
wv12 = wv[cmg_pix12];
wv21 = wv[cmg_pix21];
wv22 = wv[cmg_pix22];
/* Get the ozone pixels. If the ozone value is fill (=0), then
use a default value of 120. */
uoz11 = oz[cmg_pix11];
if (uoz11 == 0)
uoz11 = 120;
uoz12 = oz[cmg_pix12];
if (uoz12 == 0)
uoz12 = 120;
uoz21 = oz[cmg_pix21];
if (uoz21 == 0)
uoz21 = 120;
uoz22 = oz[cmg_pix22];
if (uoz22 == 0)
uoz22 = 120;
/* Get the surface pressure from the global DEM. Set to 1013.0
(sea level) if the DEM is fill (= -9999), which is likely
ocean. The dimensions on the DEM array is the same as that
of the CMG arrays. Use the current pixel locations already
calculated. */
if (dem[cmg_pix11] != -9999)
pres11 = 1013.0 * exp (-dem[cmg_pix11] * ONE_DIV_8500);
else
pres11 = 1013.0;
if (dem[cmg_pix12] != -9999)
pres12 = 1013.0 * exp (-dem[cmg_pix12] * ONE_DIV_8500);
else
pres12 = 1013.0;
if (dem[cmg_pix21] != -9999)
pres21 = 1013.0 * exp (-dem[cmg_pix21] * ONE_DIV_8500);
else
pres21 = 1013.0;
if (dem[cmg_pix22] != -9999)
pres22 = 1013.0 * exp (-dem[cmg_pix22] * ONE_DIV_8500);
else
pres22 = 1013.0;
/* Determine the fractional difference between the integer
location and floating point pixel location to be used for
interpolation */
u = (ycmg - lcmg);
v = (xcmg - scmg);
one_minus_u = 1.0 - u;
one_minus_v = 1.0 - v;
one_minus_u_x_one_minus_v = one_minus_u * one_minus_v;
one_minus_u_x_v = one_minus_u * v;
u_x_one_minus_v = u * one_minus_v;
u_x_v = u * v;
/* Interpolate water vapor, and unscale */
twvi[curr_pix] = wv11 * one_minus_u_x_one_minus_v +
wv12 * one_minus_u_x_v +
wv21 * u_x_one_minus_v +
wv22 * u_x_v;
twvi[curr_pix] = twvi[curr_pix] * 0.01; /* vs / 100 */
/* Interpolate ozone, and unscale */
tozi[curr_pix] = uoz11 * one_minus_u_x_one_minus_v +
uoz12 * one_minus_u_x_v +
uoz21 * u_x_one_minus_v +
uoz22 * u_x_v;
tozi[curr_pix] = tozi[curr_pix] * 0.0025; /* vs / 400 */
/* Interpolate surface pressure */
tp[curr_pix] = pres11 * one_minus_u_x_one_minus_v +
pres12 * one_minus_u_x_v +
pres21 * u_x_one_minus_v +
pres22 * u_x_v;
} /* end for j */
} /* end for i */
} /* if use_orig_aero */
/* Compute some EPS values */
eps1 = LOW_EPS;
eps2 = MOD_EPS;
eps3 = HIGH_EPS;
xa = (eps1 * eps1) - (eps3 * eps3);
xd = (int) ((eps2 * eps2) - (eps3 * eps3));
xb = eps1 - eps3;
xe = eps2 - eps3;
/* Start the aerosol inversion */
mytime = time(NULL);
printf ("Aerosol Inversion using %d x %d aerosol window ... %s",
SAERO_WINDOW, SAERO_WINDOW, ctime(&mytime));
#if defined(_OPENMP) && defined(USE_GCTP)
#pragma omp parallel for private (i, j, curr_pix, img, geo, lat, lon, xcmg, ycmg, lcmg, scmg, lcmg1, scmg1, u, v, one_minus_u, one_minus_v, one_minus_u_x_one_minus_v, one_minus_u_x_v, u_x_one_minus_v, u_x_v, ratio_pix11, ratio_pix12, ratio_pix21, ratio_pix22, rb1, rb2, slpr11, slpr12, slpr21, slpr22, intr11, intr12, intr21, intr22, slprb1, slprb2, slprb7, intrb1, intrb2, intrb7, xndwi, ndwi_th1, ndwi_th2, iline, isamp, curr_win_pix, pix_count, ew_line, ew_samp, ib, iband, iband1, iaots, pres, uoz, uwv, retval, eps, residual, residual1, residual2, residual3, raot, xc, xf, coefa, coefb, epsmin, resepsmin, corf, rotoa, raot550nm, roslamb, tgo, roatm, ttatmg, satm, xrorayp, ros1, ros4, ros5, erelc, troatm)
#elif defined(_OPENMP)
#pragma omp parallel for private (i, j, curr_pix, lat, lon, xcmg, ycmg, lcmg, scmg, lcmg1, scmg1, u, v, one_minus_u, one_minus_v, one_minus_u_x_one_minus_v, one_minus_u_x_v, u_x_one_minus_v, u_x_v, ratio_pix11, ratio_pix12, ratio_pix21, ratio_pix22, rb1, rb2, slpr11, slpr12, slpr21, slpr22, intr11, intr12, intr21, intr22, slprb1, slprb2, slprb7, intrb1, intrb2, intrb7, xndwi, ndwi_th1, ndwi_th2, iline, isamp, curr_win_pix, pix_count, ew_line, ew_samp, ib, iband, iband1, iaots, pres, uoz, uwv, retval, eps, residual, residual1, residual2, residual3, raot, xc, xf, coefa, coefb, epsmin, resepsmin, corf, rotoa, raot550nm, roslamb, tgo, roatm, ttatmg, satm, xrorayp, ros1, ros4, ros5, erelc, troatm)
#endif
#ifndef _OPENMP
tmp_percent = 0;
#endif
for (i = 0; i < nlines; i+=SAERO_WINDOW)
{
#ifndef _OPENMP
/* update status, but not if multi-threaded */
curr_tmp_percent = 100 * i / nlines;
if (curr_tmp_percent > tmp_percent)
{
tmp_percent = curr_tmp_percent;
if (tmp_percent % 10 == 0)
{
printf ("%d%% ", tmp_percent);
fflush (stdout);
}
}
#endif
curr_pix = i * nsamps;
for (j = 0; j < nsamps; j+=SAERO_WINDOW, curr_pix+=SAERO_WINDOW)
{
/* If this pixel is fill */
if (level1_qa_is_fill (qaband[curr_pix]))
{
ipflag[curr_pix] = (1 << IPFLAG_FILL);
continue;
}
/* Get the lat/long for the current pixel (which may not be the
center of the aerosol window), for the center of that pixel */
#ifdef USE_GCTP
img.l = i - 0.5;
img.s = j + 0.5;
img.is_fill = false;
if (!from_space (space, &img, &geo))
{
sprintf (errmsg, "Mapping line/sample (%d, %d) to "
"geolocation coords", i, j);
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
lat = geo.lat * RAD2DEG;
lon = geo.lon * RAD2DEG;
#else
utmtodeg (&space_def, i, j, &lat, &lon);
#endif
/* Use that lat/long to determine the line/sample in the
CMG-related lookup tables, using the center of the UL
pixel. Note, we are basically making sure the line/sample
combination falls within -90, 90 and -180, 180 global climate
data boundaries. However, the source code below uses lcmg+1
and scmg+1, which for some scenes may wrap around the
dateline or the poles. Thus we need to wrap the CMG data
around to the beginning of the array. */
/* Each CMG pixel is 0.05 x 0.05 degrees. Use the center of the
pixel for each calculation. Negative latitude values should
be the largest line values in the CMG grid. Negative
longitude values should be the smallest sample values in the
CMG grid. */
/* The line/sample calculation from the x/ycmg values are not
rounded. The interpolation of the value using line+1 and
sample+1 are based on the truncated numbers, therefore
rounding up is not appropriate. */
ycmg = (89.975 - lat) * 20.0; /* vs / 0.05 */
xcmg = (179.975 + lon) * 20.0; /* vs / 0.05 */
lcmg = (int) ycmg;
scmg = (int) xcmg;
/* Handle the edges of the lat/long values in the CMG grid */
if (lcmg < 0)
lcmg = 0;
else if (lcmg >= CMG_NBLAT)
lcmg = CMG_NBLAT - 1;
if (scmg < 0)
scmg = 0;
else if (scmg >= CMG_NBLON)
scmg = CMG_NBLON - 1;
/* If the current CMG pixel is at the edge of the CMG array, then
allow the next pixel for interpolation to wrap around the
array */
if (scmg >= CMG_NBLON - 1) /* 180 degrees so wrap around */
scmg1 = 0;
else
scmg1 = scmg + 1;
if (lcmg >= CMG_NBLAT - 1) /* -90 degrees, so set the next pixel
to also use -90. */
lcmg1 = lcmg;
else
lcmg1 = lcmg + 1;
/* Determine the fractional difference between the integer location
and floating point pixel location to be used for interpolation */
u = (ycmg - lcmg);
v = (xcmg - scmg);
one_minus_u = 1.0 - u;
one_minus_v = 1.0 - v;
one_minus_u_x_one_minus_v = one_minus_u * one_minus_v;
one_minus_u_x_v = one_minus_u * v;
u_x_one_minus_v = u * one_minus_v;
u_x_v = u * v;
/* Determine the band ratios and slope/intercept */
ratio_pix11 = lcmg * RATIO_NBLON + scmg;
ratio_pix12 = lcmg * RATIO_NBLON + scmg1;
ratio_pix21 = lcmg1 * RATIO_NBLON + scmg;
ratio_pix22 = lcmg1 * RATIO_NBLON + scmg1;
rb1 = ratiob1[ratio_pix11] * 0.001; /* vs. / 1000. */
rb2 = ratiob2[ratio_pix11] * 0.001; /* vs. / 1000. */
if (rb2 > 1.0 || rb1 > 1.0 || rb2 < 0.1 || rb1 < 0.1)
{
slpratiob1[ratio_pix11] = 0;
slpratiob2[ratio_pix11] = 0;
slpratiob7[ratio_pix11] = 0;
intratiob1[ratio_pix11] = 550;
intratiob2[ratio_pix11] = 600;
intratiob7[ratio_pix11] = 2000;
}
else if (sndwi[ratio_pix11] < 200)
{
slpratiob1[ratio_pix11] = 0;
slpratiob2[ratio_pix11] = 0;
slpratiob7[ratio_pix11] = 0;
intratiob1[ratio_pix11] = ratiob1[ratio_pix11];
intratiob2[ratio_pix11] = ratiob2[ratio_pix11];
intratiob7[ratio_pix11] = ratiob7[ratio_pix11];
}
rb1 = ratiob1[ratio_pix12] * 0.001; /* vs. / 1000. */
rb2 = ratiob2[ratio_pix12] * 0.001; /* vs. / 1000. */
if (rb2 > 1.0 || rb1 > 1.0 || rb2 < 0.1 || rb1 < 0.1)
{
slpratiob1[ratio_pix12] = 0;
slpratiob2[ratio_pix12] = 0;
slpratiob7[ratio_pix12] = 0;
intratiob1[ratio_pix12] = 550;
intratiob2[ratio_pix12] = 600;
intratiob7[ratio_pix12] = 2000;
}
else if (sndwi[ratio_pix12] < 200)
{
slpratiob1[ratio_pix12] = 0;
slpratiob2[ratio_pix12] = 0;
slpratiob7[ratio_pix12] = 0;
intratiob1[ratio_pix12] = ratiob1[ratio_pix12];
intratiob2[ratio_pix12] = ratiob2[ratio_pix12];
intratiob7[ratio_pix12] = ratiob7[ratio_pix12];
}
rb1 = ratiob1[ratio_pix21] * 0.001; /* vs. / 1000. */
rb2 = ratiob2[ratio_pix21] * 0.001; /* vs. / 1000. */
if (rb2 > 1.0 || rb1 > 1.0 || rb2 < 0.1 || rb1 < 0.1)
{
slpratiob1[ratio_pix21] = 0;
slpratiob2[ratio_pix21] = 0;
slpratiob7[ratio_pix21] = 0;
intratiob1[ratio_pix21] = 550;
intratiob2[ratio_pix21] = 600;
intratiob7[ratio_pix21] = 2000;
}
else if (sndwi[ratio_pix21] < 200)
{
slpratiob1[ratio_pix21] = 0;
slpratiob2[ratio_pix21] = 0;
slpratiob7[ratio_pix21] = 0;
intratiob1[ratio_pix21] = ratiob1[ratio_pix21];
intratiob2[ratio_pix21] = ratiob2[ratio_pix21];
intratiob7[ratio_pix21] = ratiob7[ratio_pix21];
}
rb1 = ratiob1[ratio_pix22] * 0.001; /* vs. / 1000. */
rb2 = ratiob2[ratio_pix22] * 0.001; /* vs. / 1000. */
if (rb2 > 1.0 || rb1 > 1.0 || rb2 < 0.1 || rb1 < 0.1)
{
slpratiob1[ratio_pix22] = 0;
slpratiob2[ratio_pix22] = 0;
slpratiob7[ratio_pix22] = 0;
intratiob1[ratio_pix22] = 550;
intratiob2[ratio_pix22] = 600;
intratiob7[ratio_pix22] = 2000;
}
else if (sndwi[ratio_pix22] < 200)
{
slpratiob1[ratio_pix22] = 0;
slpratiob2[ratio_pix22] = 0;
slpratiob7[ratio_pix22] = 0;
intratiob1[ratio_pix22] = ratiob1[ratio_pix22];
intratiob2[ratio_pix22] = ratiob2[ratio_pix22];
intratiob7[ratio_pix22] = ratiob7[ratio_pix22];
}
/* Interpolate the slope/intercept for each band, and unscale */
slpr11 = slpratiob1[ratio_pix11] * 0.001; /* vs / 1000 */
intr11 = intratiob1[ratio_pix11] * 0.001; /* vs / 1000 */
slpr12 = slpratiob1[ratio_pix12] * 0.001; /* vs / 1000 */
intr12 = intratiob1[ratio_pix12] * 0.001; /* vs / 1000 */
slpr21 = slpratiob1[ratio_pix21] * 0.001; /* vs / 1000 */
intr21 = intratiob1[ratio_pix21] * 0.001; /* vs / 1000 */
slpr22 = slpratiob1[ratio_pix22] * 0.001; /* vs / 1000 */
intr22 = intratiob1[ratio_pix22] * 0.001; /* vs / 1000 */
slprb1 = slpr11 * one_minus_u_x_one_minus_v +
slpr12 * one_minus_u_x_v +
slpr21 * u_x_one_minus_v +
slpr22 * u_x_v;
intrb1 = intr11 * one_minus_u_x_one_minus_v +
intr12 * one_minus_u_x_v +
intr21 * u_x_one_minus_v +
intr22 * u_x_v;
slpr11 = slpratiob2[ratio_pix11] * 0.001; /* vs / 1000 */
intr11 = intratiob2[ratio_pix11] * 0.001; /* vs / 1000 */
slpr12 = slpratiob2[ratio_pix12] * 0.001; /* vs / 1000 */
intr12 = intratiob2[ratio_pix12] * 0.001; /* vs / 1000 */
slpr21 = slpratiob2[ratio_pix21] * 0.001; /* vs / 1000 */
intr21 = intratiob2[ratio_pix21] * 0.001; /* vs / 1000 */
slpr22 = slpratiob2[ratio_pix22] * 0.001; /* vs / 1000 */
intr22 = intratiob2[ratio_pix22] * 0.001; /* vs / 1000 */
slprb2 = slpr11 * one_minus_u_x_one_minus_v +
slpr12 * one_minus_u_x_v +
slpr21 * u_x_one_minus_v +
slpr22 * u_x_v;
intrb2 = intr11 * one_minus_u_x_one_minus_v +
intr12 * one_minus_u_x_v +
intr21 * u_x_one_minus_v +
intr22 * u_x_v;
slpr11 = slpratiob7[ratio_pix11] * 0.001; /* vs / 1000 */
intr11 = intratiob7[ratio_pix11] * 0.001; /* vs / 1000 */
slpr12 = slpratiob7[ratio_pix12] * 0.001; /* vs / 1000 */
intr12 = intratiob7[ratio_pix12] * 0.001; /* vs / 1000 */
slpr21 = slpratiob7[ratio_pix21] * 0.001; /* vs / 1000 */
intr21 = intratiob7[ratio_pix21] * 0.001; /* vs / 1000 */
slpr22 = slpratiob7[ratio_pix22] * 0.001; /* vs / 1000 */
intr22 = intratiob7[ratio_pix22] * 0.001; /* vs / 1000 */
slprb7 = slpr11 * one_minus_u_x_one_minus_v +
slpr12 * one_minus_u_x_v +
slpr21 * u_x_one_minus_v +
slpr22 * u_x_v;
intrb7 = intr11 * one_minus_u_x_one_minus_v +
intr12 * one_minus_u_x_v +
intr21 * u_x_one_minus_v +
intr22 * u_x_v;
/* Calculate NDWI variables for the band ratios */
xndwi = ((double) sband[SRS_BAND8A][curr_pix] -
(double) (sband[SRS_BAND12][curr_pix] * 0.5)) /
((double) sband[SRS_BAND8A][curr_pix] +
(double) (sband[SRS_BAND12][curr_pix] * 0.5));
ndwi_th1 = (andwi[ratio_pix11] + 2.0 *
sndwi[ratio_pix11]) * 0.001;
ndwi_th2 = (andwi[ratio_pix11] - 2.0 *
sndwi[ratio_pix11]) * 0.001;
if (xndwi > ndwi_th1)
xndwi = ndwi_th1;
if (xndwi < ndwi_th2)
xndwi = ndwi_th2;
/* Initialize the band ratios */
for (ib = 0; ib < NSRS_BANDS; ib++)
{
erelc[ib] = -1.0;
troatm[ib] = 0.0;
}
/* Compute the band ratio - coastal aerosol, blue, red, SWIR */
erelc[DNS_BAND1] = (xndwi * slprb1 + intrb1);
erelc[DNS_BAND2] = (xndwi * slprb2 + intrb2);
erelc[DNS_BAND4] = 1.0;
erelc[DNS_BAND12] = (xndwi * slprb7 + intrb7);
/* Retrieve the TOA reflectance values for the current pixel; use
a NxN average */
pix_count = 0;
ew_line = i+SAERO_WINDOW;
for (iline = i; iline < ew_line; iline++)
{
if (iline >= nlines) continue;
ew_samp = j+SAERO_WINDOW;
for (isamp = j; isamp < ew_samp; isamp++)
{
if (isamp >= nsamps) continue;
curr_win_pix = iline * nsamps + isamp;
if (level1_qa_is_fill (qaband[curr_win_pix])) continue;
troatm[DNS_BAND1] += toaband[DNS_BAND1][curr_win_pix];
troatm[DNS_BAND2] += toaband[DNS_BAND2][curr_win_pix];
troatm[DNS_BAND4] += toaband[DNS_BAND4][curr_win_pix];
troatm[DNS_BAND12] += toaband[DNS_BAND12][curr_win_pix];
pix_count++;
}
}
troatm[DNS_BAND1] /= pix_count;
troatm[DNS_BAND2] /= pix_count;
troatm[DNS_BAND4] /= pix_count;
troatm[DNS_BAND12] /= pix_count;
/* Retrieve the aerosol information for low eps 1.0 */
iband1 = DNS_BAND4; /* red band */
iaots = 0;
if (use_orig_aero)
{
pres = tp[curr_pix];
uoz = tozi[curr_pix];
uwv = twvi[curr_pix];
retval = subaeroret (input->meta.sat, false, iband1, xts, xtv,
xmus, xmuv, xfi, cosxfi, pres, uoz, uwv, erelc, troatm,
tpres, rolutt, transt, xtsstep, xtsmin, xtvstep, xtvmin,
sphalbt, normext, tsmax, tsmin, nbfic, nbfi, tts, indts,
ttv, tauray, ogtransa1, ogtransb0, ogtransb1, wvtransa,
wvtransb, oztransa, &raot, &residual, &iaots, eps1);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing aerosol retrieval.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
subaeroret_new (input->meta.sat, false, iband1, erelc, troatm,
tgo_arr, roatm_iaMax, roatm_coef, ttatmg_coef, satm_coef,
normext_p0a3_arr, &raot, &residual, &iaots, eps1);
/* Save the data */
residual1 = residual;
/* Retrieve the aerosol information for moderate eps 1.75 */
if (use_orig_aero)
{
retval = subaeroret (input->meta.sat, false, iband1, xts, xtv,
xmus, xmuv, xfi, cosxfi, pres, uoz, uwv, erelc, troatm,
tpres, rolutt, transt, xtsstep, xtsmin, xtvstep, xtvmin,
sphalbt, normext, tsmax, tsmin, nbfic, nbfi, tts, indts,
ttv, tauray, ogtransa1, ogtransb0, ogtransb1, wvtransa,
wvtransb, oztransa, &raot, &residual, &iaots, eps2);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing aerosol retrieval.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
subaeroret_new (input->meta.sat, false, iband1, erelc, troatm,
tgo_arr, roatm_iaMax, roatm_coef, ttatmg_coef, satm_coef,
normext_p0a3_arr, &raot, &residual, &iaots, eps2);
/* Save the data */
residual2 = residual;
/* Retrieve the aerosol information for high eps 2.5 */
if (use_orig_aero)
{
retval = subaeroret (input->meta.sat, false, iband1, xts, xtv,
xmus, xmuv, xfi, cosxfi, pres, uoz, uwv, erelc, troatm,
tpres, rolutt, transt, xtsstep, xtsmin, xtvstep, xtvmin,
sphalbt, normext, tsmax, tsmin, nbfic, nbfi, tts, indts,
ttv, tauray, ogtransa1, ogtransb0, ogtransb1, wvtransa,
wvtransb, oztransa, &raot, &residual, &iaots, eps3);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing aerosol retrieval.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
subaeroret_new (input->meta.sat, false, iband1, erelc, troatm,
tgo_arr, roatm_iaMax, roatm_coef, ttatmg_coef, satm_coef,
normext_p0a3_arr, &raot, &residual, &iaots, eps3);
/* Save the data */
residual3 = residual;
/* Find the eps (angstrom coefficient for AOT) that minimizes the
residual */
xc = residual1 - residual3;
xf = residual2 - residual3;
coefa = (xc*xe - xb*xf) / (xa*xe - xb*xd);
coefb = (xa*xf - xc*xd) / (xa*xe - xb*xd);
/* Local extremum */
epsmin = -coefb / (2.0 * coefa);
resepsmin = xa*epsmin*epsmin + xb*epsmin + xc;
if ((epsmin < LOW_EPS) || (epsmin > HIGH_EPS))
{
if (residual1 < residual3)
epsmin = eps1;
else
epsmin = eps3;
}
else
{
if ((resepsmin > residual1) || (resepsmin > residual3))
{
if (residual1 < residual3)
epsmin = eps1;
else
epsmin = eps3;
}
}
eps = epsmin;
if (use_orig_aero)
{
retval = subaeroret (input->meta.sat, false, iband1, xts,
xtv, xmus, xmuv, xfi, cosxfi, pres, uoz, uwv, erelc,
troatm, tpres, rolutt, transt, xtsstep, xtsmin,
xtvstep, xtvmin, sphalbt, normext, tsmax, tsmin, nbfic,
nbfi, tts, indts, ttv, tauray, ogtransa1, ogtransb0,
ogtransb1, wvtransa, wvtransb, oztransa, &raot,
&residual, &iaots, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing aerosol retrieval.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
subaeroret_new (input->meta.sat, false, iband1, erelc, troatm,
tgo_arr, roatm_iaMax, roatm_coef, ttatmg_coef, satm_coef,
normext_p0a3_arr, &raot, &residual, &iaots, eps);
teps[curr_pix] = eps;
taero[curr_pix] = raot;
corf = raot / xmus;
/* Check the model residual. Corf represents aerosol impact.
Test the quality of the aerosol inversion. */
if (residual < (0.015 + 0.005 * corf + 0.10 * troatm[DNS_BAND12]))
{
/* Test if NIR band 8a makes sense. Use a NxN window average. */
iband = DNS_BAND8A;
rotoa = 0.0;
pix_count = 0;
ew_line = i+SAERO_WINDOW;
for (iline = i; iline < ew_line; iline++)
{
if (iline >= nlines) continue;
curr_win_pix = iline * nsamps + j;
ew_samp = j+SAERO_WINDOW;
for (isamp = j; isamp < ew_samp; isamp++, curr_win_pix++)
{
if (isamp >= nsamps) continue;
rotoa += toaband[iband][curr_win_pix];
pix_count++;
}
}
rotoa /= pix_count;
raot550nm = raot;
if (use_orig_aero)
{
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus,
xmuv, xfi, cosxfi, raot550nm, iband, pres, tpres,
aot550nm, rolutt, transt, xtsstep, xtsmin, xtvstep,
xtvmin, sphalbt, normext, tsmax, tsmin, nbfic, nbfi,
tts, indts, ttv, uoz, uwv, tauray, ogtransa1, ogtransb0,
ogtransb1, wvtransa, wvtransb, oztransa, rotoa,
&roslamb, &tgo, &roatm, &ttatmg, &satm, &xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian "
"atmospheric correction type 2.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
atmcorlamb2_new (input->meta.sat, tgo_arr[iband],
aot550nm[roatm_iaMax[iband]], &roatm_coef[iband][0],
&ttatmg_coef[iband][0], &satm_coef[iband][0], raot550nm,
iband, normext_p0a3_arr[iband], rotoa, &roslamb, eps);
ros5 = roslamb;
/* Test if red band 4 makes sense. Use a NxN window average. */
iband = DNS_BAND4;
rotoa = 0.0;
pix_count = 0;
ew_line = i+SAERO_WINDOW;
for (iline = i; iline < ew_line; iline++)
{
if (iline >= nlines) continue;
curr_win_pix = iline * nsamps + j;
ew_samp = j+SAERO_WINDOW;
for (isamp = j; isamp < ew_samp; isamp++, curr_win_pix++)
{
if (isamp >= nsamps) continue;
rotoa += toaband[iband][curr_win_pix];
pix_count++;
}
}
rotoa /= pix_count;
raot550nm = raot;
if (use_orig_aero)
{
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus,
xmuv, xfi, cosxfi, raot550nm, iband, pres, tpres,
aot550nm, rolutt, transt, xtsstep, xtsmin, xtvstep,
xtvmin, sphalbt, normext, tsmax, tsmin, nbfic, nbfi,
tts, indts, ttv, uoz, uwv, tauray, ogtransa1, ogtransb0,
ogtransb1, wvtransa, wvtransb, oztransa, rotoa,
&roslamb, &tgo, &roatm, &ttatmg, &satm, &xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian "
"atmospheric correction type 2.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
atmcorlamb2_new (input->meta.sat, tgo_arr[iband],
aot550nm[roatm_iaMax[iband]], &roatm_coef[iband][0],
&ttatmg_coef[iband][0], &satm_coef[iband][0], raot550nm,
iband, normext_p0a3_arr[iband], rotoa, &roslamb, eps);
ros4 = roslamb;
/* Use the NDVI to validate the reflectance values or flag
as water */
if ((ros5 > 0.1) && ((ros5 - ros4) / (ros5 + ros4) > 0))
{
/* Clear pixel with valid aerosol retrieval */
ipflag[curr_pix] |= (1 << IPFLAG_VALID);
}
else
{
/* Flag as water */
ipflag[curr_pix] = (1 << IPFLAG_WATER);
}
}
else
{
/* Flag as water */
ipflag[curr_pix] = (1 << IPFLAG_WATER);
}
/* Retest any water pixels to verify they are water and obtain
their aerosol */
if (lasrc_qa_is_water(ipflag[curr_pix]))
{
/* Initialize the band ratios */
for (ib = 0; ib < NSR_BANDS; ib++)
{
erelc[ib] = -1.0;
troatm[ib] = 0.0;
}
/* Retrieve the TOA reflectance values for the current pixel;
use a NxN average */
pix_count = 0;
ew_line = i+SAERO_WINDOW;
for (iline = i; iline < ew_line; iline++)
{
if (iline >= nlines) continue;
curr_win_pix = iline * nsamps + j;
ew_samp = j+SAERO_WINDOW;
for (isamp = j; isamp < ew_samp; isamp++, curr_win_pix++)
{
if (isamp >= nsamps) continue;
if (level1_qa_is_fill (qaband[curr_win_pix])) continue;
troatm[DNS_BAND1] +=
toaband[DNS_BAND1][curr_win_pix];
troatm[DNS_BAND4] +=
toaband[DNS_BAND4][curr_win_pix];
troatm[DNS_BAND8A] +=
toaband[DNS_BAND8A][curr_win_pix];
troatm[DNS_BAND12] +=
toaband[DNS_BAND12][curr_win_pix];
pix_count++;
}
}
troatm[DNS_BAND1] /= pix_count;
troatm[DNS_BAND4] /= pix_count;
troatm[DNS_BAND8A] /= pix_count;
troatm[DNS_BAND12] /= pix_count;
/* Set the band ratio - coastal aerosol, red, NIR, SWIR */
erelc[DNS_BAND1] = 1.0;
erelc[DNS_BAND4] = 1.0;
erelc[DNS_BAND8A] = 1.0;
erelc[DNS_BAND12] = 1.0;
/* Retrieve the water aerosol information for eps 1.5 */
eps = WATER_EPS;
iaots = 0;
if (use_orig_aero)
{
retval = subaeroret (input->meta.sat, true /*water*/,
iband1, xts, xtv, xmus, xmuv, xfi, cosxfi, pres, uoz,
uwv, erelc, troatm, tpres, rolutt, transt, xtsstep,
xtsmin, xtvstep, xtvmin, sphalbt, normext, tsmax,
tsmin, nbfic, nbfi, tts, indts, ttv, tauray, ogtransa1,
ogtransb0, ogtransb1, wvtransa, wvtransb, oztransa,
&raot, &residual, &iaots, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing aerosol retrieval.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
subaeroret_new (input->meta.sat, true /*water*/, iband1,
erelc, troatm, tgo_arr, roatm_iaMax, roatm_coef,
ttatmg_coef, satm_coef, normext_p0a3_arr, &raot,
&residual, &iaots, eps);
teps[curr_pix] = eps;
taero[curr_pix] = raot;
corf = raot / xmus;
/* Test band 1 reflectance to eliminate negative */
iband = DNS_BAND1;
rotoa = troatm[DNS_BAND1];
raot550nm = raot;
if (use_orig_aero)
{
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus,
xmuv, xfi, cosxfi, raot550nm, iband, pres, tpres,
aot550nm, rolutt, transt, xtsstep, xtsmin, xtvstep,
xtvmin, sphalbt, normext, tsmax, tsmin, nbfic, nbfi,
tts, indts, ttv, uoz, uwv, tauray, ogtransa1, ogtransb0,
ogtransb1, wvtransa, wvtransb, oztransa, rotoa,
&roslamb, &tgo, &roatm, &ttatmg, &satm, &xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian "
"atmospheric correction type 2.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
atmcorlamb2_new (input->meta.sat, tgo_arr[iband],
aot550nm[roatm_iaMax[iband]], &roatm_coef[iband][0],
&ttatmg_coef[iband][0], &satm_coef[iband][0], raot550nm,
iband, normext_p0a3_arr[iband], rotoa, &roslamb, eps);
ros1 = roslamb;
if (residual > (0.010 + 0.005 * corf) || ros1 < 0)
{
/* Not a valid water pixel (possibly urban). Clear all
the QA bits, and mark it as IPFLAG_FAILED. */
ipflag[curr_pix] = (1 << IPFLAG_FAILED);
}
else
{
/* Valid water pixel */
ipflag[curr_pix] = (1 << IPFLAG_WATER);
ipflag[curr_pix] |= (1 << IPFLAG_VALID);
}
} /* if water pixel */
/* Fill in the remaining taero and teps values for the window,
using the current pixel. Skip fill pixels. */
for (iline = i; iline < i+SAERO_WINDOW; iline++)
{
if (iline >= nlines) continue;
curr_win_pix = iline * nsamps + j;
for (isamp = j; isamp < j+SAERO_WINDOW;
isamp++, curr_win_pix++)
{
if (isamp >= nsamps) continue;
if (level1_qa_is_fill (qaband[curr_win_pix])) continue;
teps[curr_win_pix] = teps[curr_pix];
taero[curr_win_pix] = taero[curr_pix];
}
}
} /* end for j */
} /* end for i */
/* end aerosol inversion for the NxN window */
#ifndef _OPENMP
/* update status */
printf ("100%%\n");
fflush (stdout);
#endif
/* Done with the ratiob* arrays */
free (andwi); andwi = NULL;
free (sndwi); sndwi = NULL;
free (ratiob1); ratiob1 = NULL;
free (ratiob2); ratiob2 = NULL;
free (ratiob7); ratiob7 = NULL;
free (intratiob1); intratiob1 = NULL;
free (intratiob2); intratiob2 = NULL;
free (intratiob7); intratiob7 = NULL;
free (slpratiob1); slpratiob1 = NULL;
free (slpratiob2); slpratiob2 = NULL;
free (slpratiob7); slpratiob7 = NULL;
/* Done with the DEM, water vapor, and ozone arrays */
free (dem); dem = NULL;
free (wv); wv = NULL;
free (oz); oz = NULL;
#ifdef WRITE_TAERO
/* Write the ipflag values for comparison with other algorithms */
aero_fptr = fopen ("ipflag.img", "w");
fwrite (ipflag, npixels, sizeof (uint8), aero_fptr);
fclose (aero_fptr);
/* Write the aerosol values for comparison with other algorithms */
aero_fptr = fopen ("aerosols.img", "w");
fwrite (taero, npixels, sizeof (float), aero_fptr);
fclose (aero_fptr);
#endif
/* Use the UL corner of the aerosol windows to interpolate the remaining
aerosol pixels in the window, including the UL corner of the window */
mytime = time(NULL);
printf ("Interpolating the aerosol values in the 6x6 windows %s\n",
ctime(&mytime)); fflush(stdout);
aerosol_interp_sentinel (SAERO_WINDOW, qaband, ipflag, taero, nlines,
nsamps);
#ifdef WRITE_TAERO
/* Write the ipflag values for comparison with other algorithms */
aero_fptr = fopen ("ipflag2.img", "w");
fwrite (ipflag, npixels, sizeof (uint8), aero_fptr);
fclose (aero_fptr);
/* Write the aerosol values for comparison with other algorithms */
aero_fptr = fopen ("aerosols2.img", "w");
fwrite (taero, npixels, sizeof (float), aero_fptr);
fclose (aero_fptr);
#endif
/* Expand the area around failed pixels to smooth aerosols in the area */
mytime = time(NULL);
printf ("Expand the failed pixels %s\n", ctime(&mytime));
ipflag_expand_failed_sentinel (ipflag, nlines, nsamps);
#ifdef WRITE_TAERO
/* Write the ipflag values for comparison with other algorithms */
aero_fptr = fopen ("ipflag3.img", "w");
fwrite (ipflag, npixels, sizeof (uint8), aero_fptr);
fclose (aero_fptr);
/* Write the aerosol values for comparison with other algorithms */
aero_fptr = fopen ("aerosols3.img", "w");
fwrite (taero, npixels, sizeof (float), aero_fptr);
fclose (aero_fptr);
#endif
/* Fill in the failed pixels with an average of the clear surrounding
window pixels */
mytime = time(NULL);
printf ("Averaging the failed pixels %s\n", ctime(&mytime)); fflush(stdout);
aero_avg_failed_sentinel (qaband, ipflag, taero, teps, nlines, nsamps);
#ifdef WRITE_TAERO
/* Write the ipflag values for comparison with other algorithms */
aero_fptr = fopen ("ipflag4.img", "w");
fwrite (ipflag, nlines*nsamps, sizeof (uint8), aero_fptr);
fclose (aero_fptr);
/* Write the aerosol values for comparison with other algorithms */
aero_fptr = fopen ("aerosols4.img", "w");
fwrite (taero, nlines*nsamps, sizeof (float), aero_fptr);
fclose (aero_fptr);
#endif
/* Perform the second level of atmospheric correction using the aerosols */
mytime = time(NULL);
printf ("Performing atmospheric correction ... %s\n", ctime(&mytime));
/* Loop through all the bands */
for (ib = 0; ib <= DNS_BAND12; ib++)
{
printf (" Band %s\n", SENTINEL_BANDNAME[ib]); fflush(stdout);
#ifdef PROC_ALL_BANDS
/* Special handling of band 10 if turned on */
if (ib == DNS_BAND10)
{ /* Band 10 - just use the TOA values */
printf (" -- Band 10 so just use the TOA values\n");
for (i = 0; i < npixels; i++)
sband[ib][i] = toaband[ib][i];
/* Skip to the next band */
continue;
}
#endif
/* Process the remaining bands normally */
#ifdef _OPENMP
#pragma omp parallel for private (i, rsurf, rotoa, raot550nm, eps, pres, uwv, uoz, retval, tmpf, roslamb, tgo, roatm, ttatmg, satm, xrorayp)
#endif
for (i = 0; i < npixels; i++)
{
/* If this pixel is fill, then don't process */
if (level1_qa_is_fill (qaband[i]))
continue;
/* Correct all pixels */
rotoa = toaband[ib][i];
raot550nm = taero[i];
eps = teps[i];
if (use_orig_aero)
{
pres = tp[i];
uwv = twvi[i];
uoz = tozi[i];
retval = atmcorlamb2 (input->meta.sat, xts, xtv, xmus, xmuv,
xfi, cosxfi, raot550nm, ib, pres, tpres, aot550nm,
rolutt, transt, xtsstep, xtsmin, xtvstep, xtvmin, sphalbt,
normext, tsmax, tsmin, nbfic, nbfi, tts, indts, ttv, uoz,
uwv, tauray, ogtransa1, ogtransb0, ogtransb1, wvtransa,
wvtransb, oztransa, rotoa, &roslamb, &tgo, &roatm, &ttatmg,
&satm, &xrorayp, eps);
if (retval != SUCCESS)
{
sprintf (errmsg, "Performing lambertian "
"atmospheric correction type 2.");
error_handler (true, FUNC_NAME, errmsg);
exit (ERROR);
}
}
else
atmcorlamb2_new (input->meta.sat, tgo_arr[ib],
aot550nm[roatm_iaMax[ib]], &roatm_coef[ib][0],
&ttatmg_coef[ib][0], &satm_coef[ib][0], raot550nm, ib,
normext_p0a3_arr[ib], rotoa, &roslamb, eps);
/* If this is the coastal aerosol band then set the aerosol
bits in the QA band */
if (ib == DNS_BAND1)
{
/* Set up aerosol QA bits */
rsurf = sband[ib][i];
tmpf = fabs (rsurf - roslamb);
if (tmpf <= LOW_AERO_THRESH)
{ /* Set first aerosol bit (low aerosols) */
ipflag[i] |= (1 << AERO1_QA);
}
else
{
if (tmpf < AVG_AERO_THRESH)
{ /* Set second aerosol bit (average aerosols) */
ipflag[i] |= (1 << AERO2_QA);
}
else
{ /* Set both aerosol bits (high aerosols) */
ipflag[i] |= (1 << AERO1_QA);
ipflag[i] |= (1 << AERO2_QA);
}
}
} /* end if this is the coastal aerosol band */
/* Save the unscaled surface reflectance value */
if (roslamb < MIN_VALID_REFL)
sband[ib][i] = MIN_VALID_REFL;
else if (roslamb > MAX_VALID_REFL)
sband[ib][i] = MAX_VALID_REFL;
else
sband[ib][i] = roslamb;
} /* end for i */
} /* end for ib */
/* Free memory for arrays no longer needed */
if (use_orig_aero)
{
free (twvi);
free (tozi);
free (tp);
}
free (taero);
free (teps);
/* Write the data to the output file */
mytime = time(NULL);
printf ("Writing surface reflectance corrected data to the output "
"files ... %s", ctime(&mytime)); fflush(stdout);
/* Open the output file */
sr_output = open_output (xml_metadata, input, OUTPUT_SR);
if (sr_output == NULL)
{ /* error message already printed */
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Loop through the reflectance bands and write the data */
for (ib = 0; ib <= DNS_BAND12; ib++)
{
/* Scale the output data from float to int16 */
printf (" Band %s: %s\n", SENTINEL_BANDNAME[ib],
sr_output->metadata.band[ib].file_name);
convert_output (sband, ib, nlines, nsamps, false, out_band);
/* Write the scaled product */
if (put_output_lines (sr_output, out_band, ib, 0, nlines,
sizeof (uint16)) != SUCCESS)
{
sprintf (errmsg, "Writing output data for band %d", ib);
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Create the ENVI header file this band */
if (create_envi_struct (&sr_output->metadata.band[ib],
&xml_metadata->global, &envi_hdr) != SUCCESS)
{
sprintf (errmsg, "Creating ENVI header structure.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Write the ENVI header */
strcpy (envi_file, sr_output->metadata.band[ib].file_name);
cptr = strchr (envi_file, '.');
strcpy (cptr, ".hdr");
if (write_envi_hdr (envi_file, &envi_hdr) != SUCCESS)
{
sprintf (errmsg, "Writing ENVI header file.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
}
/* Append the surface reflectance bands to the XML file */
if (append_metadata (NREFLS_BANDS, sr_output->metadata.band, xml_infile)
!= SUCCESS)
{
sprintf (errmsg, "Appending surface reflectance bands to the "
"XML file.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Write the aerosol QA band */
printf (" Aerosol Band %d: %s\n", SRS_AEROSOL+1,
sr_output->metadata.band[SRS_AEROSOL].file_name);
if (put_output_lines (sr_output, ipflag, SRS_AEROSOL, 0, nlines,
sizeof (uint8)) != SUCCESS)
{
sprintf (errmsg, "Writing aerosol QA output data");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Free memory for ipflag data */
free (ipflag);
/* Create the ENVI header for the aerosol QA band */
if (create_envi_struct (&sr_output->metadata.band[SRS_AEROSOL],
&xml_metadata->global, &envi_hdr) != SUCCESS)
{
sprintf (errmsg, "Creating ENVI header structure.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Write the ENVI header */
strcpy (envi_file, sr_output->metadata.band[SRS_AEROSOL].file_name);
cptr = strchr (envi_file, '.');
strcpy (cptr, ".hdr");
if (write_envi_hdr (envi_file, &envi_hdr) != SUCCESS)
{
sprintf (errmsg, "Writing ENVI header file.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Append the aerosol QA band to the XML file */
if (append_metadata (1, &sr_output->metadata.band[SRS_AEROSOL],
xml_infile) != SUCCESS)
{
sprintf (errmsg, "Appending aerosol QA band to XML file.");
error_handler (true, FUNC_NAME, errmsg);
return (ERROR);
}
/* Close the output surface reflectance products */
close_output (sat, sr_output, OUTPUT_SR);
free_output (sr_output, OUTPUT_SR);
/* Free the spatial mapping pointer */
free (space);
/* Free the data arrays */
free (rolutt);
free (transt);
free (sphalbt);
free (normext);
free (tsmax);
free (tsmin);
free (nbfic);
free (nbfi);
free (ttv);
/* Successful completion */
mytime = time(NULL);
printf ("Surface reflectance correction complete ... %s\n", ctime(&mytime));
return (SUCCESS);
}
|
uts_omp.c | /******************************************************
* Unbalanced Tree Search v2.1 *
* Based on the implementation available at *
* http://sourceforge.net/projects/uts-benchmark *
******************************************************/
#ifdef HAVE_CONFIG_H
# include "config.h" /* for _GNU_SOURCE */
#endif
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <limits.h> /* for INT_MAX */
#include <math.h> /* for floor, log, sin */
#include <omp.h>
#include <qthread/qthread.h>
#include <qthread/qtimer.h>
#define SILENT_ARGPARSING
#include "argparsing.h"
#include "log.h"
#define BRG_RNG // Select RNG
#include "../../utils/rng/rng.h"
#define PRINT_STATS 1
#define MAXNUMCHILDREN 100
static size_t nodecount;
typedef enum {
BIN = 0,
GEO,
HYBRID,
BALANCED
} tree_t;
static char *type_names[] = {
"Binomial",
"Geometric",
"Hybrid",
"Balanced"
};
typedef enum {
LINEAR = 0,
EXPDEC,
CYCLIC,
FIXED
} shape_t;
static char *shape_names[] = {
"Linear decrease",
"Exponential decrease",
"Cyclic",
"Fixed branching factor"
};
typedef struct {
int height; // Depth of node in the tree
struct state_t state; // Local RNG state
int num_children;
} node_t;
// Default values
static tree_t tree_type = GEO;
static double bf_0 = 4.0;
static int root_seed = 0;
static int num_samples = 1;
static int tree_depth = 6;
static shape_t shape_fn = LINEAR;
static int non_leaf_bf = 4;
static double non_leaf_prob = 15.0 / 64.0;
static double shift_depth = 0.5;
// Tree metrics
static uint64_t tree_height = 0;
static uint64_t num_leaves = 0;
static double normalize(int n)
{
if (n < 0) {
printf("*** toProb: rand n = %d out of range\n", n);
}
return ((n < 0) ? 0.0 : ((double)n) / (double)INT_MAX);
}
static int calc_num_children_bin(node_t *parent)
{
int v = rng_rand(parent->state.state);
double d = normalize(v);
return (d < non_leaf_prob) ? non_leaf_bf : 0;
}
static int calc_num_children(node_t *parent)
{
int num_children = 0;
if (parent->height == 0) { num_children = (int)floor(bf_0); } else { num_children = calc_num_children_bin(parent); }
if (parent->height == 0) {
int root_bf = (int)ceil(bf_0);
if (num_children > root_bf) {
printf("*** Number of children truncated from %d to %d\n",
num_children, root_bf);
num_children = root_bf;
}
} else {
if (num_children > MAXNUMCHILDREN) {
printf("*** Number of children truncated from %d to %d\n",
num_children, MAXNUMCHILDREN);
num_children = MAXNUMCHILDREN;
}
}
return num_children;
}
#define BIG_STACKS
// Notes:
// - Each task receives distinct copy of parent
// - Copy of child is shallow, be careful with `state` member
static long visit(node_t *parent,
int num_children)
{
uint64_t num_descendants = 1;
#ifdef BIG_STACKS
uint64_t child_descendants[num_children];
node_t child_nodes[num_children];
#else
uint64_t *child_descendants;
node_t *child_nodes;
if (num_children > 0) {
child_descendants = calloc(sizeof(uint64_t), num_children);
child_nodes = malloc(sizeof(node_t) * num_children);
}
#endif
// Spawn children, if any
for (int i = 0; i < num_children; i++) {
node_t *child = &child_nodes[i];
child->height = parent->height + 1;
for (int j = 0; j < num_samples; j++) {
rng_spawn(parent->state.state, child->state.state, i);
}
child->num_children = calc_num_children(child);
#pragma omp task untied firstprivate(i, child) shared(child_descendants)
child_descendants[i] = visit(child, child->num_children);
}
#pragma omp taskwait
// #pragma omp parallel for reduction(+:num_descendants)
for (int i = 0; i < num_children; i++) {
num_descendants += child_descendants[i];
}
#ifndef BIG_STACKS
if (num_children > 0) {
free(child_descendants);
free(child_nodes);
}
#endif
return num_descendants;
}
#ifdef PRINT_STATS
static void print_stats(void)
{
LOG_UTS_PARAMS_YAML()
fflush(stdout);
}
#else /* ifdef PRINT_STATS */
static void print_banner(void)
{
printf("UTS - Unbalanced Tree Search 2.1 (C/Qthreads)\n");
printf("Tree type:%3d (%s)\n", tree_type, type_names[tree_type]);
printf("Tree shape parameters:\n");
printf(" root branching factor b_0 = %.1f, root seed = %d\n",
bf_0, root_seed);
if ((tree_type == GEO) || (tree_type == HYBRID)) {
printf(" GEO parameters: gen_mx = %d, shape function = %d (%s)\n",
tree_depth, shape_fn, shape_names[shape_fn]);
}
if ((tree_type == BIN) || (tree_type == HYBRID)) {
double q = non_leaf_prob;
int m = non_leaf_bf;
double es = (1.0 / (1.0 - q * m));
printf(" BIN parameters: q = %f, m = %d, E(n) = %f, E(s) = %.2f\n",
q, m, q * m, es);
}
if (tree_type == HYBRID) {
printf(" HYBRID: GEO from root to depth %d, then BIN\n",
(int)ceil(shift_depth * tree_depth));
}
if (tree_type == BALANCED) {
printf(" BALANCED parameters: gen_mx = %d\n", tree_depth);
printf(" Expected size: %llu nodes, %llu leaves\n",
(unsigned long long)((pow(bf_0, tree_depth + 1) - 1.0) / (bf_0 - 1.0)),
(unsigned long long)pow(bf_0, tree_depth));
}
printf("Random number generator: ");
printf("SHA-1 (state size = %ldB)\n", sizeof(struct state_t));
printf("Compute granularity: %d\n", num_samples);
printf("Execution strategy:\n");
printf(" Workers: %d\n", omp_get_num_threads());
printf("\n");
fflush(stdout);
}
#endif /* ifdef PRINT_STATS */
int main(int argc,
char *argv[])
{
uint64_t total_num_nodes = 0;
qtimer_t timer;
double total_time = 0.0;
int threads = 1;
CHECK_VERBOSE();
{
unsigned int tmp = (unsigned int)tree_type;
NUMARG(tmp, "UTS_TREE_TYPE");
if (tmp <= BALANCED) {
tree_type = (tree_t)tmp;
} else {
fprintf(stderr, "invalid tree type\n");
return EXIT_FAILURE;
}
tmp = (unsigned int)shape_fn;
NUMARG(tmp, "UTS_SHAPE_FN");
if (tmp <= FIXED) {
shape_fn = (shape_t)tmp;
} else {
fprintf(stderr, "invalid shape function\n");
return EXIT_FAILURE;
}
}
DBLARG(bf_0, "UTS_BF_0");
NUMARG(root_seed, "UTS_ROOT_SEED");
NUMARG(tree_depth, "UTS_TREE_DEPTH");
DBLARG(non_leaf_prob, "UTS_NON_LEAF_PROB");
NUMARG(non_leaf_bf, "UTS_NON_LEAF_NUM");
NUMARG(shift_depth, "UTS_SHIFT_DEPTH");
NUMARG(num_samples, "UTS_NUM_SAMPLES");
#pragma omp parallel
#pragma omp single
{
#ifdef PRINT_STATS
print_stats();
#else
print_banner();
#endif
threads = omp_get_num_threads();
}
timer = qtimer_create();
qtimer_start(timer);
node_t root;
root.height = 0;
rng_init(root.state.state, root_seed);
root.num_children = calc_num_children(&root);
nodecount = 1;
long retval;
#pragma omp parallel
#pragma omp single nowait
#pragma omp task untied
retval = visit(&root, root.num_children);
total_num_nodes = retval;
qtimer_stop(timer);
total_time = qtimer_secs(timer);
qtimer_destroy(timer);
#ifdef PRINT_STATS
LOG_UTS_RESULTS_YAML(total_num_nodes, total_time)
LOG_ENV_OMP_YAML(threads)
#else
printf("Tree size = %lu, tree depth = %d, num leaves = %llu (%.2f%%)\n",
(unsigned long)total_num_nodes,
(int)tree_height,
(unsigned long long)num_leaves,
num_leaves / (float)total_num_nodes * 100.0);
printf("Wallclock time = %.3f sec, performance = %.0f "
"nodes/sec (%.0f nodes/sec per PE)\n\n",
total_time,
total_num_nodes / total_time,
total_num_nodes / total_time / omp_get_num_threads());
#endif /* ifdef PRINT_STATS */
return 0;
}
/* vim:set expandtab */
|
fixed_version.c | #include <stdio.h>
int main(){
int sum =0;
int temp[100];
int DATA_MAG = 100;
int H[100];
int scale_factor = 10;
int i;
int LUT[100];
#pragma omp parallel default (none) private(i) shared (temp, LUT, H, scale_factor, sum, DATA_MAG)
{
#pragma omp for
for (i =0; i < DATA_MAG;i++) {
H[i] = i;
temp[i]=0;
}
#pragma omp single
for (i = 0; i < DATA_MAG; i++)
{
sum += H[i];
temp[i]=sum;
}
#pragma omp for
for (i = 0; i < DATA_MAG; i++){
LUT[i] = temp[i] * scale_factor;
}
}
for (i = 0; i < 100; i++) {
printf("%d\n",LUT[i]);
}
return 0;
}
|
sobel_filter.c | /*
* Description: Implementation of Sobel Filter for detecting
* edges on .pgm images.
* Author: Athanasios Kastoras | University of Thessaly
* Email: akastoras@uth.gr
*/
#include "pgm.h"
#include <stdio.h>
#include <math.h>
#include <omp.h>
#define THRESHOLD(a,max) ((a > max)? max: 0)
/* Implementation of Sobel Operator on pgm_t images */
pgm_t *sobel_filter(const pgm_t *image)
{
/* Variable Declaration */
register int x_sum, y_sum;
const register int height = image->height, width = image->width;
const int maxval = image->maxval;
pgm_t *new_image = new_pgm_image(width, height, image->maxval);
/* Parallelizing using OpenMP */
/* Give an entire row to a single thread to increase cache performance */
#pragma omp parallel for private(x_sum, y_sum) schedule(static, 1)
for (register int x = 1; x < height - 1; ++x) {
/*
* Apply the Sobel Filter's kernel convolution
* on each pixel of a single row.
* Convolution matrices:
* X:
* -1 0 1
* -2 0 2
* -1 0 1
* Y:
* -1 -2 -1
* 0 0 0
* 1 2 1
* Convolve with X to get Gx and with Y to get Gy
* The final pixel value is the Eucledian norm of Gx and Gy
*/
for (register int y = 1; y < width - 1; ++y) {
x_sum = (
image->pixels[(x + 1)*width + (y + 1)] -
image->pixels[(x + 1)*width + (y - 1)] +
(image->pixels[ (x)*width + (y + 1)] << 1) -
(image->pixels[ (x)*width + (y - 1)] << 1) +
image->pixels[(x - 1)*width + (y + 1)] -
image->pixels[(x - 1)*width + (y - 1)]
);
y_sum = (
image->pixels[(x + 1)*width + (y + 1)] +
(image->pixels[(x + 1)*width + (y)] << 1) +
image->pixels[(x + 1)*width + (y - 1)] -
image->pixels[(x - 1)*width + (y + 1)] -
(image->pixels[(x - 1)*width + (y)] << 1) -
image->pixels[(x - 1)*width + (y - 1)]
);
// Manhatan Distance is used instead of Eucledian to increase performance
new_image->pixels[x * width + y] = THRESHOLD(x_sum + y_sum, maxval);
}
}
return new_image;
}
/* Driver program to test sobel_filter function */
int main(int argc, char **argv) {
if (argc != 3) {
printf("Invalid Arguments!\n");
return 1;
}
pgm_t *image = load_pgm_image(argv[1]); // Load an Image
pgm_t *new_image = sobel_filter(image); // Implement sobel_filter()
store_pgm_image(new_image, argv[2]); // Store in a new image
return 0;
}
|
arrays.c | /**
* module with tools for manipulating arrays
* Julien Lesgourgues, 18.04.2010
*/
#include "arrays.h"
/**
* Called by thermodynamics_init(); perturb_sources().
*/
int array_derive(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_dydx,
ErrorMsg errmsg) {
int i;
double dx1,dx2,dy1,dy2,weight1,weight2;
class_test((index_dydx == index_x) || (index_dydx == index_y),
errmsg,
"output column %d must differ from input columns %d and %d",index_dydx,index_x,index_y);
dx2=array[1*n_columns+index_x]-array[0*n_columns+index_x];
dy2=array[1*n_columns+index_y]-array[0*n_columns+index_y];
for (i=1; i<n_lines-1; i++) {
dx1 = dx2;
dy1 = dy2;
dx2 = array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x];
dy2 = array[(i+1)*n_columns+index_y]-array[i*n_columns+index_y];
class_test((dx1 == 0) || (dx2 == 0),
errmsg,
"stop to avoid division by zero");
weight1 = dx2*dx2;
weight2 = dx1*dx1;
array[i*n_columns+index_dydx] = (weight1*dy1+weight2*dy2) / (weight1*dx1+weight2*dx2);
if (i == 1)
array[(i-1)*n_columns+index_dydx] = 2.*dy1/dx1 - array[i*n_columns+index_dydx];
if (i == n_lines-2)
array[(i+1)*n_columns+index_dydx] = 2.*dy2/dx2 - array[i*n_columns+index_dydx];
}
return _SUCCESS_;
}
int array_derive_spline(
double * x_array,
int n_lines,
double * array,
double * array_splined,
int n_columns,
int index_y,
int index_dydx,
ErrorMsg errmsg) {
int i;
double h;
class_test(index_dydx == index_y,
errmsg,
"Output column %d must differ from input columns %d",
index_dydx,
index_y);
class_test(n_lines<2,
errmsg,
"no possible derivation with less than two lines");
for (i=0; i<n_lines-1; i++) {
h = x_array[i+1] - x_array[i];
if (h == 0) {
sprintf(errmsg,"%s(L:%d) h=0, stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
array[i*n_columns+index_dydx] =
(array[(i+1)*n_columns+index_y] - array[i*n_columns+index_y])/h
- h / 6. * (array_splined[(i+1)*n_columns+index_y] + 2. * array_splined[i*n_columns+index_y]);
}
h = x_array[n_lines-1] - x_array[n_lines-2];
array[(n_lines-1)*n_columns+index_dydx] =
(array[(n_lines-1)*n_columns+index_y] - array[(n_lines-2)*n_columns+index_y])/h
+ h / 6. * (2. * array_splined[(n_lines-1)*n_columns+index_y] + array_splined[(n_lines-2)*n_columns+index_y]);
return _SUCCESS_;
}
int array_derive_spline_table_line_to_line(
double * x_array,
int n_lines,
double * array,
int n_columns,
int index_y,
int index_ddy,
int index_dy,
ErrorMsg errmsg) {
int i;
double h;
class_test(index_ddy == index_y,
errmsg,
"Output column %d must differ from input columns %d",
index_ddy,
index_y);
class_test(index_ddy == index_dy,
errmsg,
"Output column %d must differ from input columns %d",
index_ddy,
index_dy);
class_test(n_lines<2,
errmsg,
"no possible derivation with less than two lines");
for (i=0; i<n_lines-1; i++) {
h = x_array[i+1] - x_array[i];
if (h == 0) {
sprintf(errmsg,"%s(L:%d) h=0, stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
array[i*n_columns+index_dy] =
(array[(i+1)*n_columns+index_y] - array[i*n_columns+index_y])/h
- h / 6. * (array[(i+1)*n_columns+index_ddy] + 2. * array[i*n_columns+index_ddy]);
}
h = x_array[n_lines-1] - x_array[n_lines-2];
array[(n_lines-1)*n_columns+index_dy] =
(array[(n_lines-1)*n_columns+index_y] - array[(n_lines-2)*n_columns+index_y])/h
+ h / 6. * (2. * array[(n_lines-1)*n_columns+index_ddy] + array[(n_lines-2)*n_columns+index_ddy]);
return _SUCCESS_;
}
int array_derive1_order2_table_line_to_line(
double * x_array,
int n_lines,
double * array,
int n_columns,
int index_y,
int index_dy,
ErrorMsg errmsg) {
int i=1;
double dxp,dxm,dyp,dym;
if (n_lines < 2) {
sprintf(errmsg,"%s(L:%d) routine called with n_lines=%d, should be at least 2",__func__,__LINE__,n_lines);
return _FAILURE_;
}
dxp = x_array[2] - x_array[1];
dxm = x_array[0] - x_array[1];
dyp = *(array+2*n_columns+index_y) - *(array+1*n_columns+index_y);
dym = *(array+0*n_columns+index_y) - *(array+1*n_columns+index_y);
if ((dxp*dxm*(dxm-dxp)) == 0.) {
sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
*(array+1*n_columns+index_dy) = (dyp*dxm*dxm-dym*dxp*dxp)/(dxp*dxm*(dxm-dxp));
*(array+0*n_columns+index_dy) = *(array+1*n_columns+index_dy)
- (x_array[1] - x_array[0]) * 2.*(dyp*dxm-dym*dxp)/(dxp*dxm*(dxp-dxm));
for (i=2; i<n_lines-1; i++) {
dxp = x_array[i+1] - x_array[i];
dxm = x_array[i-1] - x_array[i];
dyp = *(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y);
dym = *(array+(i-1)*n_columns+index_y) - *(array+i*n_columns+index_y);
if ((dxp*dxm*(dxm-dxp)) == 0.) {
sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
*(array+i*n_columns+index_dy) = (dyp*dxm*dxm-dym*dxp*dxp)/(dxp*dxm*(dxm-dxp));
}
*(array+(n_lines-1)*n_columns+index_dy) = *(array+(n_lines-2)*n_columns+index_dy)
+ (x_array[n_lines-1] - x_array[n_lines-2]) * 2.*(dyp*dxm-dym*dxp)/(dxp*dxm*(dxp-dxm));
return _SUCCESS_;
}
int array_derive2_order2_table_line_to_line(
double * x_array,
int n_lines,
double * array,
int n_columns,
int index_y,
int index_dy,
int index_ddy,
ErrorMsg errmsg) {
int i;
double dxp,dxm,dyp,dym;
for (i=1; i<n_lines-1; i++) {
dxp = x_array[i+1] - x_array[i];
dxm = x_array[i-1] - x_array[i];
dyp = *(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y);
dym = *(array+(i-1)*n_columns+index_y) - *(array+i*n_columns+index_y);
if ((dxp*dxm*(dxm-dxp)) == 0.) {
sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
*(array+i*n_columns+index_dy) = (dyp*dxm*dxm-dym*dxp*dxp)/(dxp*dxm*(dxm-dxp));
*(array+i*n_columns+index_ddy) = 2.*(dyp*dxm-dym*dxp)/(dxp*dxm*(dxp-dxm));
}
*(array+0*n_columns+index_dy) = *(array+1*n_columns+index_dy)
- (x_array[1] - x_array[0]) * *(array+1*n_columns+index_ddy);
*(array+0*n_columns+index_ddy) = *(array+1*n_columns+index_ddy);
*(array+(n_lines-1)*n_columns+index_dy) = *(array+(n_lines-2)*n_columns+index_dy)
+ (x_array[n_lines-1] - x_array[n_lines-2]) * *(array+(n_lines-2)*n_columns+index_ddy);
*(array+(n_lines-1)*n_columns+index_ddy) = *(array+(n_lines-2)*n_columns+index_ddy);
return _SUCCESS_;
}
int array_integrate_spline_table_line_to_line(
double * x_array,
int n_lines,
double * array,
int n_columns,
int index_y,
int index_ddy,
int index_inty,
ErrorMsg errmsg) {
int i;
double h;
*(array+0*n_columns+index_inty) = 0.;
for (i=0; i < n_lines-1; i++) {
h = (x_array[i+1]-x_array[i]);
*(array+(i+1)*n_columns+index_inty) = *(array+i*n_columns+index_inty) +
(array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.+
(array[i*n_columns+index_ddy]+array[(i+1)*n_columns+index_ddy])*h*h*h/24.;
}
return _SUCCESS_;
}
/**
* Not called.
*/
int array_derive_two(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_dydx,
int index_ddydxdx,
ErrorMsg errmsg) {
int i;
double dx1,dx2,dy1,dy2,weight1,weight2;
if ((index_dydx == index_x) || (index_dydx == index_y)) {
sprintf(errmsg,"%s(L:%d) : Output column %d must differ from input columns %d and %d",__func__,__LINE__,index_dydx,index_x,index_y);
return _FAILURE_;
}
dx2=*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x);
dy2=*(array+1*n_columns+index_y)-*(array+0*n_columns+index_y);
for (i=1; i<n_lines-1; i++) {
dx1 = dx2;
dy1 = dy2;
dx2 = *(array+(i+1)*n_columns+index_x)-*(array+i*n_columns+index_x);
dy2 = *(array+(i+1)*n_columns+index_y)-*(array+i*n_columns+index_y);
weight1 = dx2*dx2;
weight2 = dx1*dx1;
if ((dx1 == 0.) && (dx2 == 0.)) {
sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
*(array+i*n_columns+index_dydx) = (weight1*dy1+weight2*dy2) / (weight1*dx1+weight2*dx2);
*(array+i*n_columns+index_ddydxdx) = (dx2*dy1-dx1*dy2) / (weight1*dx1+weight2*dx2);
if (i == 1) {
*(array+(i-1)*n_columns+index_dydx) = 2.*dy1/dx1 - *(array+i*n_columns+index_dydx);
*(array+(i-1)*n_columns+index_ddydxdx) = *(array+i*n_columns+index_ddydxdx);
}
if (i == n_lines-2) {
*(array+(i+1)*n_columns+index_dydx) = 2.*dy2/dx2 - *(array+i*n_columns+index_dydx);
*(array+(i+1)*n_columns+index_dydx) = *(array+i*n_columns+index_ddydxdx);
}
}
return _SUCCESS_;
}
int array_spline(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_ddydx2,
short spline_mode,
ErrorMsg errmsg) {
int i,k;
double p,qn,sig,un;
double * u;
double dy_first;
double dy_last;
if (n_lines < 3) {
sprintf(errmsg,"%s(L:%d) n_lines=%d, while routine needs n_lines >= 3",__func__,__LINE__,n_lines);
return _FAILURE_;
}
u = malloc((n_lines-1) * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (spline_mode == _SPLINE_NATURAL_) {
*(array+0*n_columns+index_ddydx2) = u[0] = 0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_first =
((*(array+2*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+2*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+1*n_columns+index_y)-*(array+0*n_columns+index_y))-
(*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+2*n_columns+index_y)-*(array+0*n_columns+index_y)))/
((*(array+2*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+2*n_columns+index_x)-*(array+1*n_columns+index_x)));
*(array+0*n_columns+index_ddydx2) = -0.5;
u[0] =
(3./(*(array+1*n_columns+index_x) - *(array+0*n_columns+index_x)))*
((*(array+1*n_columns+index_y) - *(array+0*n_columns+index_y))/
(*(array+1*n_columns+index_x) - *(array+0*n_columns+index_x))
-dy_first);
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (i=1; i < n_lines-1; i++) {
sig = (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x))
/ (*(array+(i+1)*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
p = sig * *(array+(i-1)*n_columns+index_ddydx2) + 2.0;
*(array+i*n_columns+index_ddydx2) = (sig-1.0)/p;
u[i] = (*(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y))
/ (*(array+(i+1)*n_columns+index_x) - *(array+i*n_columns+index_x))
- (*(array+i*n_columns+index_y) - *(array+(i-1)*n_columns+index_y))
/ (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
u[i]= (6.0 * u[i] /
(*(array+(i+1)*n_columns+index_x) - *(array+(i-1)*n_columns+index_x))
- sig * u[i-1]) / p;
}
if (spline_mode == _SPLINE_NATURAL_) {
qn=0.;
un=0.;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_last =
((*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-2)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y))-
(*(array+(n_lines-2)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-2)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-3)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y)))/
((*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-2)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-2)*n_columns+index_x)));
qn=0.5;
un =
(3./(*(array+(n_lines-1)*n_columns+index_x) - *(array+(n_lines-2)*n_columns+index_x)))*
(dy_last-(*(array+(n_lines-1)*n_columns+index_y) - *(array+(n_lines-2)*n_columns+index_y))/
(*(array+(n_lines-1)*n_columns+index_x) - *(array+(n_lines-2)*n_columns+index_x)));
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
*(array+(n_lines-1)*n_columns+index_ddydx2) =
(un-qn*u[n_lines-2])/(qn* *(array+(n_lines-2)*n_columns+index_ddydx2)+1.0);
for (k=n_lines-2; k>=0; k--)
*(array+k*n_columns+index_ddydx2) = *(array+k*n_columns+index_ddydx2) *
*(array+(k+1)*n_columns+index_ddydx2) + u[k];
free(u);
return _SUCCESS_;
}
int array_spline_table_line_to_line(
double * x, /* vector of size x_size */
int n_lines,
double * array,
int n_columns,
int index_y,
int index_ddydx2,
short spline_mode,
ErrorMsg errmsg) {
int i,k;
double p,qn,sig,un;
double * u;
double dy_first;
double dy_last;
u = malloc((n_lines-1) * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (spline_mode == _SPLINE_NATURAL_) {
*(array+0*n_columns+index_ddydx2) = u[0] = 0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(*(array+1*n_columns+index_y)-*(array+0*n_columns+index_y))-
(x[1]-x[0])*(x[1]-x[0])*
(*(array+2*n_columns+index_y)-*(array+0*n_columns+index_y)))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
*(array+0*n_columns+index_ddydx2) = -0.5;
u[0] =
(3./(x[1] - x[0]))*
((*(array+1*n_columns+index_y) - *(array+0*n_columns+index_y))/
(x[1] - x[0])-dy_first);
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (i=1; i < n_lines-1; i++) {
sig = (x[i] - x[i-1]) / (x[i+1] - x[i-1]);
p = sig * *(array+(i-1)*n_columns+index_ddydx2) + 2.0;
*(array+i*n_columns+index_ddydx2) = (sig-1.0)/p;
u[i] = (*(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y))
/ (x[i+1] - x[i])
- (*(array+i*n_columns+index_y) - *(array+(i-1)*n_columns+index_y))
/ (x[i] - x[i-1]);
u[i]= (6.0 * u[i] /
(x[i+1] - x[i-1])
- sig * u[i-1]) / p;
}
if (spline_mode == _SPLINE_NATURAL_) {
qn=0.;
un=0.;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_last =
((x[n_lines-3]-x[n_lines-1])*(x[n_lines-3]-x[n_lines-1])*
(*(array+(n_lines-2)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y))-
(x[n_lines-2]-x[n_lines-1])*(x[n_lines-2]-x[n_lines-1])*
(*(array+(n_lines-3)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y)))/
((x[n_lines-3]-x[n_lines-1])*(x[n_lines-2]-x[n_lines-1])*(x[n_lines-3]-x[n_lines-2]));
qn=0.5;
un =
(3./(x[n_lines-1] - x[n_lines-2]))*
(dy_last-(*(array+(n_lines-1)*n_columns+index_y) - *(array+(n_lines-2)*n_columns+index_y))/
(x[n_lines-1] - x[n_lines-2]));
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
*(array+(n_lines-1)*n_columns+index_ddydx2) =
(un-qn*u[n_lines-2])/(qn* *(array+(n_lines-2)*n_columns+index_ddydx2)+1.0);
for (k=n_lines-2; k>=0; k--)
*(array+k*n_columns+index_ddydx2) = *(array+k*n_columns+index_ddydx2) *
*(array+(k+1)*n_columns+index_ddydx2) + u[k];
free(u);
return _SUCCESS_;
}
int array_spline_table_lines(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_x*y_size+index_y] */
int y_size,
double * ddy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double * p;
double * qn;
double * un;
double * u;
double sig;
int index_x;
int index_y;
double dy_first;
double dy_last;
u = malloc((x_size-1) * y_size * sizeof(double));
p = malloc(y_size * sizeof(double));
qn = malloc(y_size * sizeof(double));
un = malloc(y_size * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (p == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__);
return _FAILURE_;
}
if (qn == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__);
return _FAILURE_;
}
if (un == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__);
return _FAILURE_;
}
if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ at least 3 x-values are needed.
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_x*y_size+index_y] = u[index_x*y_size+index_y] = 0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(y_array[1*y_size+index_y]-y_array[0*y_size+index_y])-
(x[1]-x[0])*(x[1]-x[0])*
(y_array[2*y_size+index_y]-y_array[0*y_size+index_y]))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
ddy_array[index_x*y_size+index_y] = -0.5;
u[index_x*y_size+index_y] =
(3./(x[1] - x[0]))*
((y_array[1*y_size+index_y]-y_array[0*y_size+index_y])/
(x[1] - x[0])-dy_first);
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]);
for (index_y=0; index_y < y_size; index_y++) {
p[index_y] = sig * ddy_array[(index_x-1)*y_size+index_y] + 2.0;
ddy_array[index_x*y_size+index_y] = (sig-1.0)/p[index_y];
u[index_x*y_size+index_y] =
(y_array[(index_x+1)*y_size+index_y] - y_array[index_x*y_size+index_y])
/ (x[index_x+1] - x[index_x])
- (y_array[index_x*y_size+index_y] - y_array[(index_x-1)*y_size+index_y])
/ (x[index_x] - x[index_x-1]);
u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] /
(x[index_x+1] - x[index_x-1])
- sig * u[(index_x-1)*y_size+index_y]) / p[index_y];
}
}
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
qn[index_y]=un[index_y]=0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_last =
((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])*
(y_array[(x_size-2)*y_size+index_y]-y_array[(x_size-1)*y_size+index_y])-
(x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*
(y_array[(x_size-3)*y_size+index_y]-y_array[(x_size-1)*y_size+index_y]))/
((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2]));
qn[index_y]=0.5;
un[index_y]=
(3./(x[x_size-1] - x[x_size-2]))*
(dy_last-(y_array[(x_size-1)*y_size+index_y] - y_array[(x_size-2)*y_size+index_y])/
(x[x_size-1] - x[x_size-2]));
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
index_x=x_size-1;
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_x*y_size+index_y] =
(un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) /
(qn[index_y] * ddy_array[(index_x-1)*y_size+index_y] + 1.0);
}
for (index_x=x_size-2; index_x >= 0; index_x--) {
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_x*y_size+index_y] = ddy_array[index_x*y_size+index_y] *
ddy_array[(index_x+1)*y_size+index_y] + u[index_x*y_size+index_y];
}
}
free(qn);
free(un);
free(p);
free(u);
return _SUCCESS_;
}
int array_logspline_table_lines(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_x*y_size+index_y] */
int y_size,
double * ddlny_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double * p;
double * qn;
double * un;
double * u;
double sig;
int index_x;
int index_y;
double dy_first;
double dy_last;
u = malloc((x_size-1) * y_size * sizeof(double));
p = malloc(y_size * sizeof(double));
qn = malloc(y_size * sizeof(double));
un = malloc(y_size * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (p == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__);
return _FAILURE_;
}
if (qn == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__);
return _FAILURE_;
}
if (un == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__);
return _FAILURE_;
}
if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ at least 3 x-values are needed.
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
ddlny_array[index_x*y_size+index_y] = u[index_x*y_size+index_y] = 0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_first =
((log(x[2])-log(x[0]))*(log(x[2])-log(x[0]))*
(log(y_array[1*y_size+index_y])-log(y_array[0*y_size+index_y]))-
(log(x[1])-log(x[0]))*(log(x[1])-log(x[0]))*
(log(y_array[2*y_size+index_y])-log(y_array[0*y_size+index_y])))/
((log(x[2])-log(x[0]))*(log(x[1])-log(x[0]))*(log(x[2])-log(x[1])));
ddlny_array[index_x*y_size+index_y] = -0.5;
u[index_x*y_size+index_y] =
(3./(log(x[1]) - log(x[0])))*
((log(y_array[1*y_size+index_y])-log(y_array[0*y_size+index_y]))/
(log(x[1]) - log(x[0]))-dy_first);
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (log(x[index_x]) - log(x[index_x-1]))/(log(x[index_x+1]) - log(x[index_x-1]));
for (index_y=0; index_y < y_size; index_y++) {
p[index_y] = sig * ddlny_array[(index_x-1)*y_size+index_y] + 2.0;
ddlny_array[index_x*y_size+index_y] = (sig-1.0)/p[index_y];
u[index_x*y_size+index_y] =
(log(y_array[(index_x+1)*y_size+index_y]) - log(y_array[index_x*y_size+index_y]))
/ (log(x[index_x+1]) - log(x[index_x]))
- (log(y_array[index_x*y_size+index_y]) - log(y_array[(index_x-1)*y_size+index_y]))
/ (log(x[index_x]) - log(x[index_x-1]));
u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] /
(log(x[index_x+1]) - log(x[index_x-1]))
- sig * u[(index_x-1)*y_size+index_y]) / p[index_y];
}
}
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
qn[index_y]=un[index_y]=0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_last =
((log(x[x_size-3])-log(x[x_size-1]))*(log(x[x_size-3])-log(x[x_size-1]))*
(log(y_array[(x_size-2)*y_size+index_y])-log(y_array[(x_size-1)*y_size+index_y]))-
(log(x[x_size-2])-log(x[x_size-1]))*(log(x[x_size-2])-log(x[x_size-1]))*
(log(y_array[(x_size-3)*y_size+index_y])-log(y_array[(x_size-1)*y_size+index_y])))/
((log(x[x_size-3])-log(x[x_size-1]))*(log(x[x_size-2])-log(x[x_size-1]))*(log(x[x_size-3])-log(x[x_size-2])));
qn[index_y]=0.5;
un[index_y]=
(3./(log(x[x_size-1]) - log(x[x_size-2])))*
(dy_last-(log(y_array[(x_size-1)*y_size+index_y]) - log(y_array[(x_size-2)*y_size+index_y]))/
(log(x[x_size-1]) - log(x[x_size-2])));
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
index_x=x_size-1;
for (index_y=0; index_y < y_size; index_y++) {
ddlny_array[index_x*y_size+index_y] =
(un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) /
(qn[index_y] * ddlny_array[(index_x-1)*y_size+index_y] + 1.0);
}
for (index_x=x_size-2; index_x >= 0; index_x--) {
for (index_y=0; index_y < y_size; index_y++) {
ddlny_array[index_x*y_size+index_y] = ddlny_array[index_x*y_size+index_y] *
ddlny_array[(index_x+1)*y_size+index_y] + u[index_x*y_size+index_y];
}
}
free(qn);
free(un);
free(p);
free(u);
return _SUCCESS_;
}
int array_spline_table_columns(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
double * ddy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double * p;
double * qn;
double * un;
double * u;
double sig;
int index_x;
int index_y;
double dy_first;
double dy_last;
u = malloc((x_size-1) * y_size * sizeof(double));
p = malloc(y_size * sizeof(double));
qn = malloc(y_size * sizeof(double));
un = malloc(y_size * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (p == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__);
return _FAILURE_;
}
if (qn == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__);
return _FAILURE_;
}
if (un == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__);
return _FAILURE_;
}
if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ at least 3 x-values are needed.
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_y*x_size+index_x] = 0.0;
u[index_x*y_size+index_y] = 0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
class_test(x[2]-x[0]==0.,
errmsg,
"x[2]=%g, x[0]=%g, stop to avoid seg fault",x[2],x[0]);
class_test(x[1]-x[0]==0.,
errmsg,
"x[1]=%g, x[0]=%g, stop to avoid seg fault",x[1],x[0]);
class_test(x[2]-x[1]==0.,
errmsg,
"x[2]=%g, x[1]=%g, stop to avoid seg fault",x[2],x[1]);
for (index_y=0; index_y < y_size; index_y++) {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(y_array[index_y*x_size+1]-y_array[index_y*x_size+0])-
(x[1]-x[0])*(x[1]-x[0])*
(y_array[index_y*x_size+2]-y_array[index_y*x_size+0]))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
ddy_array[index_y*x_size+index_x] = -0.5;
u[index_x*y_size+index_y] =
(3./(x[1] - x[0]))*
((y_array[index_y*x_size+1]-y_array[index_y*x_size+0])/
(x[1] - x[0])-dy_first);
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]);
for (index_y=0; index_y < y_size; index_y++) {
p[index_y] = sig * ddy_array[index_y*x_size+(index_x-1)] + 2.0;
ddy_array[index_y*x_size+index_x] = (sig-1.0)/p[index_y];
u[index_x*y_size+index_y] =
(y_array[index_y*x_size+(index_x+1)] - y_array[index_y*x_size+index_x])
/ (x[index_x+1] - x[index_x])
- (y_array[index_y*x_size+index_x] - y_array[index_y*x_size+(index_x-1)])
/ (x[index_x] - x[index_x-1]);
u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] /
(x[index_x+1] - x[index_x-1])
- sig * u[(index_x-1)*y_size+index_y]) / p[index_y];
}
}
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
qn[index_y]=un[index_y]=0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_last =
((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-2)]-y_array[index_y*x_size+(x_size-1)])-
(x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-3)]-y_array[index_y*x_size+(x_size-1)]))/
((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2]));
qn[index_y]=0.5;
un[index_y]=
(3./(x[x_size-1] - x[x_size-2]))*
(dy_last-(y_array[index_y*x_size+(x_size-1)] - y_array[index_y*x_size+(x_size-2)])/
(x[x_size-1] - x[x_size-2]));
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
index_x=x_size-1;
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_y*x_size+index_x] =
(un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) /
(qn[index_y] * ddy_array[index_y*x_size+(index_x-1)] + 1.0);
}
for (index_x=x_size-2; index_x >= 0; index_x--) {
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_y*x_size+index_x] = ddy_array[index_y*x_size+index_x] *
ddy_array[index_y*x_size+(index_x+1)] + u[index_x*y_size+index_y];
}
}
free(qn);
free(p);
free(u);
free(un);
return _SUCCESS_;
}
int array_spline_table_columns2(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
double * ddy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double * p;
double * qn;
double * un;
double * u;
double sig;
int index_x;
int index_y;
double dy_first;
double dy_last;
u = malloc((x_size-1) * y_size * sizeof(double));
p = malloc(y_size * sizeof(double));
qn = malloc(y_size * sizeof(double));
un = malloc(y_size * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (p == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__);
return _FAILURE_;
}
if (qn == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__);
return _FAILURE_;
}
if (un == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__);
return _FAILURE_;
}
if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ 3 x-values are needed.
#pragma omp parallel \
shared(x,x_size,y_array,y_size,ddy_array,spline_mode,p,qn,un,u) \
private(index_y,index_x,sig,dy_first,dy_last)
{
#pragma omp for schedule (dynamic)
for (index_y=0; index_y < y_size; index_y++) {
if (spline_mode == _SPLINE_NATURAL_) {
ddy_array[index_y*x_size+0] = 0.0;
u[0*y_size+index_y] = 0.0;
}
else {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(y_array[index_y*x_size+1]-y_array[index_y*x_size+0])-
(x[1]-x[0])*(x[1]-x[0])*
(y_array[index_y*x_size+2]-y_array[index_y*x_size+0]))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
ddy_array[index_y*x_size+0] = -0.5;
u[0*y_size+index_y] =
(3./(x[1] - x[0]))*
((y_array[index_y*x_size+1]-y_array[index_y*x_size+0])/
(x[1] - x[0])-dy_first);
}
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]);
p[index_y] = sig * ddy_array[index_y*x_size+(index_x-1)] + 2.0;
ddy_array[index_y*x_size+index_x] = (sig-1.0)/p[index_y];
u[index_x*y_size+index_y] =
(y_array[index_y*x_size+(index_x+1)] - y_array[index_y*x_size+index_x])
/ (x[index_x+1] - x[index_x])
- (y_array[index_y*x_size+index_x] - y_array[index_y*x_size+(index_x-1)])
/ (x[index_x] - x[index_x-1]);
u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] /
(x[index_x+1] - x[index_x-1])
- sig * u[(index_x-1)*y_size+index_y]) / p[index_y];
}
if (spline_mode == _SPLINE_NATURAL_) {
qn[index_y]=un[index_y]=0.0;
}
else {
dy_last =
((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-2)]-y_array[index_y*x_size+(x_size-1)])-
(x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-3)]-y_array[index_y*x_size+(x_size-1)]))/
((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2]));
qn[index_y]=0.5;
un[index_y]=
(3./(x[x_size-1] - x[x_size-2]))*
(dy_last-(y_array[index_y*x_size+(x_size-1)] - y_array[index_y*x_size+(x_size-2)])/
(x[x_size-1] - x[x_size-2]));
}
index_x=x_size-1;
ddy_array[index_y*x_size+index_x] =
(un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) /
(qn[index_y] * ddy_array[index_y*x_size+(index_x-1)] + 1.0);
for (index_x=x_size-2; index_x >= 0; index_x--) {
ddy_array[index_y*x_size+index_x] = ddy_array[index_y*x_size+index_x] *
ddy_array[index_y*x_size+(index_x+1)] + u[index_x*y_size+index_y];
}
}
}
free(qn);
free(p);
free(u);
free(un);
return _SUCCESS_;
}
int array_spline_table_one_column(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double p;
double qn;
double un;
double * u;
double sig;
int index_x;
double dy_first;
double dy_last;
u = malloc((x_size-1) * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ at least 3 x-values are needed.
/************************************************/
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
ddy_array[index_y*x_size+index_x] = 0.0;
u[index_x] = 0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(y_array[index_y*x_size+1]-y_array[index_y*x_size+0])-
(x[1]-x[0])*(x[1]-x[0])*
(y_array[index_y*x_size+2]-y_array[index_y*x_size+0]))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
ddy_array[index_y*x_size+index_x] = -0.5;
u[index_x] =
(3./(x[1] - x[0]))*
((y_array[index_y*x_size+1]-y_array[index_y*x_size+0])/
(x[1] - x[0])-dy_first);
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
/************************************************/
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]);
p = sig * ddy_array[index_y*x_size+(index_x-1)] + 2.0;
ddy_array[index_y*x_size+index_x] = (sig-1.0)/p;
u[index_x] =
(y_array[index_y*x_size+(index_x+1)] - y_array[index_y*x_size+index_x])
/ (x[index_x+1] - x[index_x])
- (y_array[index_y*x_size+index_x] - y_array[index_y*x_size+(index_x-1)])
/ (x[index_x] - x[index_x-1]);
u[index_x] = (6.0 * u[index_x] /
(x[index_x+1] - x[index_x-1])
- sig * u[index_x-1]) / p;
}
/************************************************/
if (spline_mode == _SPLINE_NATURAL_) {
qn=un=0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_last =
((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-2)]-y_array[index_y*x_size+(x_size-1)])-
(x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-3)]-y_array[index_y*x_size+(x_size-1)]))/
((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2]));
qn=0.5;
un=
(3./(x[x_size-1] - x[x_size-2]))*
(dy_last-(y_array[index_y*x_size+(x_size-1)] - y_array[index_y*x_size+(x_size-2)])/
(x[x_size-1] - x[x_size-2]));
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
/************************************************/
index_x=x_size-1;
ddy_array[index_y*x_size+index_x] =
(un - qn * u[index_x-1]) /
(qn * ddy_array[index_y*x_size+(index_x-1)] + 1.0);
for (index_x=x_size-2; index_x >= 0; index_x--) {
ddy_array[index_y*x_size+index_x] = ddy_array[index_y*x_size+index_x] *
ddy_array[index_y*x_size+(index_x+1)] + u[index_x];
}
free(u);
return _SUCCESS_;
}
int array_logspline_table_one_column(
double * x, /* vector of size x_size */
int x_size,
int x_stop,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddlogy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double p;
double qn;
double un;
double * u;
double sig;
int index_x;
double dy_first;
double dy_last;
u = malloc((x_stop-1) * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ at least 3 x-values are needed.
/************************************************/
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
ddlogy_array[index_y*x_size+index_x] = 0.0;
u[index_x] = 0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_first =
((log(x[2])-log(x[0]))*(log(x[2])-log(x[0]))*
(log(y_array[index_y*x_size+1])-log(y_array[index_y*x_size+0]))-
(log(x[1])-log(x[0]))*(log(x[1])-log(x[0]))*
(log(y_array[index_y*x_size+2])-log(y_array[index_y*x_size+0])))/
((log(x[2])-log(x[0]))*(log(x[1])-log(x[0]))*(log(x[2])-log(x[1])));
ddlogy_array[index_y*x_size+index_x] = -0.5;
u[index_x] =
(3./(log(x[1]) - log(x[0])))*
((log(y_array[index_y*x_size+1])-log(y_array[index_y*x_size+0]))/
(log(x[1]) - log(x[0]))-dy_first);
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
/************************************************/
for (index_x=1; index_x < x_stop-1; index_x++) {
sig = (log(x[index_x]) - log(x[index_x-1]))/(log(x[index_x+1]) - log(x[index_x-1]));
p = sig * ddlogy_array[index_y*x_size+(index_x-1)] + 2.0;
ddlogy_array[index_y*x_size+index_x] = (sig-1.0)/p;
u[index_x] =
(log(y_array[index_y*x_size+(index_x+1)]) - log(y_array[index_y*x_size+index_x]))
/ (log(x[index_x+1]) - log(x[index_x]))
- (log(y_array[index_y*x_size+index_x]) - log(y_array[index_y*x_size+(index_x-1)]))
/ (log(x[index_x]) - log(x[index_x-1]));
u[index_x] = (6.0 * u[index_x] /
(log(x[index_x+1]) - log(x[index_x-1]))
- sig * u[index_x-1]) / p;
}
/************************************************/
if (spline_mode == _SPLINE_NATURAL_) {
qn=un=0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_last =
((log(x[x_stop-3])-log(x[x_stop-1]))*(log(x[x_stop-3])-log(x[x_stop-1]))*
(log(y_array[index_y*x_size+(x_stop-2)])-log(y_array[index_y*x_size+(x_stop-1)]))-
(log(x[x_stop-2])-log(x[x_stop-1]))*(log(x[x_stop-2])-log(x[x_stop-1]))*
(log(y_array[index_y*x_size+(x_stop-3)])-log(y_array[index_y*x_size+(x_stop-1)])))/
((log(x[x_stop-3])-log(x[x_stop-1]))*(log(x[x_stop-2])-log(x[x_stop-1]))*
(log(x[x_stop-3])-log(x[x_stop-2])));
qn=0.5;
un=
(3./(log(x[x_stop-1]) - log(x[x_stop-2])))*
(dy_last-(log(y_array[index_y*x_size+(x_stop-1)]) - log(y_array[index_y*x_size+(x_stop-2)]))/
(log(x[x_stop-1]) - log(x[x_stop-2])));
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
/************************************************/
index_x=x_stop-1;
ddlogy_array[index_y*x_size+index_x] =
(un - qn * u[index_x-1]) /
(qn * ddlogy_array[index_y*x_size+(index_x-1)] + 1.0);
for (index_x=x_stop-2; index_x >= 0; index_x--) {
ddlogy_array[index_y*x_size+index_x] = ddlogy_array[index_y*x_size+index_x] *
ddlogy_array[index_y*x_size+(index_x+1)] + u[index_x];
}
free(u);
return _SUCCESS_;
}
int array_integrate_all_spline(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_ddy,
double * result,
ErrorMsg errmsg) {
int i;
double h;
*result = 0;
for (i=0; i < n_lines-1; i++) {
h = (array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x]);
*result +=
(array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.+
(array[i*n_columns+index_ddy]+array[(i+1)*n_columns+index_ddy])*h*h*h/24.;
}
return _SUCCESS_;
}
int array_integrate_all_trapzd_or_spline(
double * array,
int n_columns,
int n_lines,
int index_start_spline,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_ddy,
double * result,
ErrorMsg errmsg) {
int i;
double h;
if ((index_start_spline<0) || (index_start_spline>=n_lines)) {
sprintf(errmsg,"%s(L:%d) index_start_spline outside of range",__func__,__LINE__);
return _FAILURE_;
}
*result = 0;
/* trapezoidal integration till given index */
for (i=0; i < index_start_spline; i++) {
h = (array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x]);
*result +=
(array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.;
}
/* then, spline integration */
for (i=index_start_spline; i < n_lines-1; i++) {
h = (array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x]);
*result +=
(array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.+
(array[i*n_columns+index_ddy]+array[(i+1)*n_columns+index_ddy])*h*h*h/24.;
}
return _SUCCESS_;
}
/**
* Not called.
*/
int array_integrate(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_int_y_dx,
ErrorMsg errmsg) {
int i;
double sum;
if ((index_int_y_dx == index_x) || (index_int_y_dx == index_y)) {
sprintf(errmsg,"%s(L:%d) : Output column %d must differ from input columns %d and %d",__func__,__LINE__,index_int_y_dx,index_x,index_y);
return _FAILURE_;
}
sum=0.;
*(array+0*n_columns+index_int_y_dx)=sum;
for (i=1; i<n_lines; i++) {
sum += 0.5 * (*(array+i*n_columns+index_y) + *(array+(i-1)*n_columns+index_y))
* (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
*(array+i*n_columns+index_int_y_dx)=sum;
}
return _SUCCESS_;
}
/**
* Called by thermodynamics_init().
*/
int array_integrate_ratio(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y1,
int index_y2,
int index_int_y1_over_y2_dx,
ErrorMsg errmsg) {
int i;
double sum;
if ((index_int_y1_over_y2_dx == index_x) || (index_int_y1_over_y2_dx == index_y1) || (index_int_y1_over_y2_dx == index_y2)) {
sprintf(errmsg,"%s(L:%d) : Output column %d must differ from input columns %d, %d and %d",__func__,__LINE__,index_int_y1_over_y2_dx,index_x,index_y1,index_y2);
return _FAILURE_;
}
sum=0.;
*(array+0*n_columns+index_int_y1_over_y2_dx)=sum;
for (i=1; i<n_lines; i++) {
sum += 0.5 * (*(array+i*n_columns+index_y1) / *(array+i*n_columns+index_y2)
+ *(array+(i-1)*n_columns+index_y1) / *(array+(i-1)*n_columns+index_y2))
* (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
*(array+i*n_columns+index_int_y1_over_y2_dx)=sum;
}
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are all columns of the same array
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double weight;
inf=0;
sup=n_lines-1;
if (*(array+inf*n_columns+index_x) < *(array+sup*n_columns+index_x)){
if (x < *(array+inf*n_columns+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array+inf*n_columns+index_x));
return _FAILURE_;
}
if (x > *(array+sup*n_columns+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array+sup*n_columns+index_x));
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < *(array+mid*n_columns+index_x)) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < *(array+sup*n_columns+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array+sup*n_columns+index_x));
return _FAILURE_;
}
if (x > *(array+inf*n_columns+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array+inf*n_columns+index_x));
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > *(array+mid*n_columns+index_x)) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
weight=(x-*(array+inf*n_columns+index_x))/(*(array+sup*n_columns+index_x)-*(array+inf*n_columns+index_x));
for (i=0; i<result_size; i++)
*(result+i) = *(array+inf*n_columns+i) * (1.-weight)
+ weight * *(array+sup*n_columns+i);
*(result+index_x) = x;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_spline(
double * __restrict__ x_array,
int n_lines,
double * __restrict__ array,
double * __restrict__ array_splined,
int n_columns,
double x,
int * __restrict__ last_index,
double * __restrict__ result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double h,a,b;
inf=0;
sup=n_lines-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
a * *(array+inf*n_columns+i) +
b * *(array+sup*n_columns+i) +
((a*a*a-a)* *(array_splined+inf*n_columns+i) +
(b*b*b-b)* *(array_splined+sup*n_columns+i))*h*h/6.;
return _SUCCESS_;
}
/**
* Get the y[i] for which y[i]>c
*
* Called by nonlinear_HMcode()
*/
int array_search_bisect(
int n_lines,
double * __restrict__ array,
double c,
int * __restrict__ last_index,
ErrorMsg errmsg) {
int inf,sup,mid;
inf=0;
sup=n_lines-1;
if (array[inf] < array[sup]){
if (c < array[inf]) {
sprintf(errmsg,"%s(L:%d) : c=%e < y_min=%e",__func__,__LINE__,c,array[inf]);
return _FAILURE_;
}
if (c > array[sup]) {
sprintf(errmsg,"%s(L:%d) : c=%e > y_max=%e",__func__,__LINE__,c,array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (c < array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (c < array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,c,array[sup]);
return _FAILURE_;
}
if (c > array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,c,array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (c > array[mid]) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_linear(
double * x_array,
int n_lines,
double * array,
int n_columns,
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double h,a,b;
inf=0;
sup=n_lines-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
a * *(array+inf*n_columns+i) +
b * *(array+sup*n_columns+i);
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_logspline(
double * x_array,
int n_lines,
double * array,
double * array_logsplined,
int n_columns,
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double h,a,b;
inf=0;
sup=n_lines-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
h = log(x_array[sup]) - log(x_array[inf]);
b = (log(x)-log(x_array[inf]))/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) = exp(
a * log(array[inf*n_columns+i]) +
b * log(array[sup*n_columns+i]) +
((a*a*a-a)* array_logsplined[inf*n_columns+i] +
(b*b*b-b)* array_logsplined[sup*n_columns+i])*h*h/6.);
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
*
*/
int array_interpolate_spline_one_column(
double * x_array,
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddy_array, /* array of size x_size*y_size */
double x, /* input */
double * y, /* output */
ErrorMsg errmsg
) {
int inf,sup,mid;
double h,a,b;
inf=0;
sup=x_size-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
*y =
a * y_array[index_y * x_size + inf] +
b * y_array[index_y * x_size + sup] +
((a*a*a-a)* ddy_array[index_y * x_size + inf] +
(b*b*b-b)* ddy_array[index_y * x_size + sup])*h*h/6.;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
*
*/
int array_interpolate_extrapolate_spline_one_column(
double * x_array,
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddy_array, /* array of size x_size*y_size */
double x, /* input */
double * y, /* output */
ErrorMsg errmsg
) {
int inf,sup,mid;
double h,a,b;
if (x > x_array[x_size-2] || x < x_array[0]) {
/*interpolate/extrapolate linearly y as a function of x*/
h = x_array[x_size-1] - x_array[x_size-2];
b = (x-x_array[x_size-2])/h;
a = 1-b;
*y = a * y_array[index_y * x_size + (x_size-2)] +
b * y_array[index_y * x_size + (x_size-1)];
}
else {
/*interpolate y as a function of x with a spline*/
inf=0;
sup=x_size-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
*y =
a * y_array[index_y * x_size + inf] +
b * y_array[index_y * x_size + sup] +
((a*a*a-a)* ddy_array[index_y * x_size + inf] +
(b*b*b-b)* ddy_array[index_y * x_size + sup])*h*h/6.;
}
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
*
*/
int array_interpolate_extrapolate_logspline_loglinear_one_column(
double * x_array,
int x_size,
int x_stop,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddlogy_array, /* array of size x_size*y_size */
double x, /* input */
double * y, /* output */
ErrorMsg errmsg
) {
int inf,sup,mid;
double h,a,b;
if (x > x_array[x_stop-1]) {
/*interpolate/extrapolate linearly ln(y) as a function of ln(x)*/
h = log(x_array[x_stop-1]) - log(x_array[x_stop-2]);
b = (log(x)-log(x_array[x_stop-2]))/h;
a = 1-b;
/* *y = exp(a * log(y_array[index_y * x_size + (x_stop-2)]) + */
/* b * log(y_array[index_y * x_size + (x_stop-1)])); */
*y = exp(log(y_array[index_y * x_size + (x_stop-1)])
+(log(x)-log(x_array[x_stop-1]))
*((log(y_array[index_y * x_size + (x_stop-1)])-log(y_array[index_y * x_size + (x_stop-2)]))/h
+h/6.*(ddlogy_array[index_y * x_size + (x_stop-2)]+2.*ddlogy_array[index_y * x_size + (x_stop-1)])));
}
else {
/*interpolate ln(y) as a function of ln(x) with a spline*/
inf=0;
sup=x_stop-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
h = log(x_array[sup]) - log(x_array[inf]);
b = (log(x)-log(x_array[inf]))/h;
a = 1-b;
*y = exp(a * log(y_array[index_y * x_size + inf]) +
b * log(y_array[index_y * x_size + sup]) +
((a*a*a-a)* ddlogy_array[index_y * x_size + inf] +
(b*b*b-b)* ddlogy_array[index_y * x_size + sup])*h*h/6.);
}
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are all columns of the same array, x is arranged in growing order, and the point x is presumably close to the previous point x from the last call of this function.
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_growing_closeby(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,i;
double weight;
inf = *last_index;
sup = *last_index+1;
while (x < *(array+inf*n_columns+index_x)) {
inf--;
if (inf < 0) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,
x,array[index_x]);
return _FAILURE_;
}
}
sup = inf+1;
while (x > *(array+sup*n_columns+index_x)) {
sup++;
if (sup > (n_lines-1)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,array[(n_lines-1)*n_columns+index_x]);
return _FAILURE_;
}
}
inf = sup-1;
*last_index = inf;
weight=(x-*(array+inf*n_columns+index_x))/(*(array+sup*n_columns+index_x)-*(array+inf*n_columns+index_x));
for (i=0; i<result_size; i++)
*(result+i) = *(array+inf*n_columns+i) * (1.-weight)
+ weight * *(array+sup*n_columns+i);
*(result+index_x) = x;
return _SUCCESS_;
}
/**
* interpolate to get y(x), when x and y are two columns of the same array, x is arranged in growing order, and the point x is presumably close to the previous point x from the last call of this function.
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_one_growing_closeby(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
double x,
int * last_index,
int index_y,
double * result,
ErrorMsg errmsg) {
int inf,sup;
double weight;
inf = *last_index;
sup = *last_index+1;
while (x < *(array+inf*n_columns+index_x)) {
inf--;
if (inf < 0) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,
x,array[index_x]);
return _FAILURE_;
}
}
sup = inf+1;
while (x > *(array+sup*n_columns+index_x)) {
sup++;
if (sup > (n_lines-1)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,array[(n_lines-1)*n_columns+index_x]);
return _FAILURE_;
}
}
inf = sup-1;
*last_index = inf;
weight=(x-*(array+inf*n_columns+index_x))/(*(array+sup*n_columns+index_x)-*(array+inf*n_columns+index_x));
*result = *(array+inf*n_columns+index_y) * (1.-weight) + *(array+sup*n_columns+index_y) * weight;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are all columns of the same array, x is arranged in growing order, and the point x is presumably very close to the previous point x from the last call of this function.
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_spline_growing_closeby(
double * x_array,
int n_lines,
double * array,
double * array_splined,
int n_columns,
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,i;
double h,a,b;
/*
if (*last_index < 0) {
sprintf(errmsg,"%s(L:%d) problem with last_index =%d < 0",__func__,__LINE__,*last_index);
return _FAILURE_;
}
if (*last_index > (n_lines-1)) {
sprintf(errmsg,"%s(L:%d) problem with last_index =%d > %d",__func__,__LINE__,*last_index,n_lines-1);
return _FAILURE_;
}
*/
inf = *last_index;
class_test(inf<0 || inf>(n_lines-1),
errmsg,
"*lastindex=%d out of range [0:%d]\n",inf,n_lines-1);
while (x < x_array[inf]) {
inf--;
if (inf < 0) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,
x,x_array[0]);
return _FAILURE_;
}
}
sup = inf+1;
while (x > x_array[sup]) {
sup++;
if (sup > (n_lines-1)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,x_array[n_lines-1]);
return _FAILURE_;
}
}
inf = sup-1;
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
a * *(array+inf*n_columns+i) +
b * *(array+sup*n_columns+i) +
((a*a*a-a)* *(array_splined+inf*n_columns+i) +
(b*b*b-b)* *(array_splined+sup*n_columns+i))*h*h/6.;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are all columns of the same array, x is arranged in growing order, and the point x is presumably close (but maybe not so close) to the previous point x from the last call of this function.
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_spline_growing_hunt(
double * x_array,
int n_lines,
double * array,
double * array_splined,
int n_columns,
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i,inc;
double h,a,b;
inc=1;
if (x >= x_array[*last_index]) {
if (x > x_array[n_lines-1]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,x_array[n_lines-1]);
return _FAILURE_;
}
/* try closest neighboor upward */
inf = *last_index;
sup = inf + inc;
if (x > x_array[sup]) {
/* hunt upward */
while (x > x_array[sup]) {
inf = sup;
inc += 1;
sup += inc;
if (sup > n_lines-1) {
sup = n_lines-1;
}
}
/* bisect */
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
}
else {
if (x < x_array[0]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,
x,x_array[0]);
return _FAILURE_;
}
/* try closest neighboor downward */
sup = *last_index;
inf = sup - inc;
if (x < x_array[inf]) {
/* hunt downward */
while (x < x_array[inf]) {
sup = inf;
inc += 1;
inf -= inc;
if (inf < 0) {
inf = 0;
}
}
/* bisect */
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
}
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
a * *(array+inf*n_columns+i) +
b * *(array+sup*n_columns+i) +
((a*a*a-a)* *(array_splined+inf*n_columns+i) +
(b*b*b-b)* *(array_splined+sup*n_columns+i))*h*h/6.;
return _SUCCESS_;
}
/**
* interpolate linearily to get y_i(x), when x and y_i are in two different arrays
*
* Called by transfer_interpolate_sources(); transfer_functions_at_k(); perturb_sources_at_eta().
*/
int array_interpolate_two(
double * array_x,
int n_columns_x,
int index_x, /** from 0 to (n_columns_x-1) */
double * array_y,
int n_columns_y,
int n_lines, /** must be the same for array_x and array_y */
double x,
double * result,
int result_size, /** from 1 to n_columns_y */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double weight;
inf=0;
sup=n_lines-1;
if (array_x[inf*n_columns_x+index_x] < array_x[sup*n_columns_x+index_x]){
if (x < array_x[inf*n_columns_x+index_x]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,array_x[inf*n_columns_x+index_x]);
return _FAILURE_;
}
if (x > array_x[sup*n_columns_x+index_x]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,array_x[sup*n_columns_x+index_x]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < array_x[mid*n_columns_x+index_x]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < *(array_x+sup*n_columns_x+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array_x+sup*n_columns_x+index_x));
return _FAILURE_;
}
if (x > *(array_x+inf*n_columns_x+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array_x+inf*n_columns_x+index_x));
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > *(array_x+mid*n_columns_x+index_x)) {sup=mid;}
else {inf=mid;}
}
}
weight=(x-*(array_x+inf*n_columns_x+index_x))/(*(array_x+sup*n_columns_x+index_x)-*(array_x+inf*n_columns_x+index_x));
for (i=0; i<result_size; i++)
*(result+i) = *(array_y+i*n_lines+inf) * (1.-weight)
+ weight * *(array_y+i*n_lines+sup) ;
return _SUCCESS_;
}
/**
* Same as array_interpolate_two, but with order of indices exchanged in array_y
*/
int array_interpolate_two_bis(
double * array_x,
int n_columns_x,
int index_x, /** from 0 to (n_columns_x-1) */
double * array_y,
int n_columns_y,
int n_lines, /** must be the same for array_x and array_y */
double x,
double * result,
int result_size, /** from 1 to n_columns_y */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double weight;
inf=0;
sup=n_lines-1;
if (array_x[inf*n_columns_x+index_x] < array_x[sup*n_columns_x+index_x]){
if (x < array_x[inf*n_columns_x+index_x]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,array_x[inf*n_columns_x+index_x]);
return _FAILURE_;
}
if (x > array_x[sup*n_columns_x+index_x]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,array_x[sup*n_columns_x+index_x]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < array_x[mid*n_columns_x+index_x]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < *(array_x+sup*n_columns_x+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array_x+sup*n_columns_x+index_x));
return _FAILURE_;
}
if (x > *(array_x+inf*n_columns_x+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array_x+inf*n_columns_x+index_x));
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > *(array_x+mid*n_columns_x+index_x)) {sup=mid;}
else {inf=mid;}
}
}
weight=(x-*(array_x+inf*n_columns_x+index_x))/(*(array_x+sup*n_columns_x+index_x)-*(array_x+inf*n_columns_x+index_x));
for (i=0; i<result_size; i++)
*(result+i) = *(array_y+inf*n_columns_y+i) * (1.-weight)
+ weight * *(array_y+sup*n_columns_y+i) ;
return _SUCCESS_;
}
/**
* interpolate linearily to get y_i(x), when x and y_i are in two different arrays
*
* Called by transfer_interpolate_sources(); transfer_functions_at_k(); perturb_sources_at_eta().
*/
int array_interpolate_two_arrays_one_column(
double * array_x, /* assumed to be a vector (i.e. one column array) */
double * array_y,
int n_columns_y,
int index_y, /* between 0 and (n_columns_y-1) */
int n_lines, /** must be the same for array_x and array_y */
double x,
double * result,
ErrorMsg errmsg) {
int inf,sup,mid;
double weight;
double epsilon=1e-9;
inf=0;
sup=n_lines-1;
if (array_x[inf] < array_x[sup]){
class_test(x < array_x[inf]-epsilon,
errmsg,
"x=%e < x_min=%e",x,array_x[inf]);
class_test(x > array_x[sup]+epsilon,
errmsg,
"x=%e > x_max=%e",x,array_x[sup]);
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < array_x[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
class_test(x < array_x[sup]-epsilon,
errmsg,
"x=%e < x_min=%e",x,array_x[sup]);
class_test(x > array_x[inf]+epsilon,
errmsg,
"x=%e > x_max=%e",x,array_x[inf]);
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > array_x[mid]) {sup=mid;}
else {inf=mid;}
}
}
weight=(x-array_x[inf])/(array_x[sup]-array_x[inf]);
*result = array_y[index_y*n_lines+inf] * (1.-weight)
+ weight * array_y[index_y*n_lines+sup];
return _SUCCESS_;
}
/**
* Called by transfer_solve().
*/
int array_interpolate_equal(
double * array,
int n_columns,
int n_lines,
double x,
double x_min,
double x_max,
double * result,
ErrorMsg errmsg) {
int index_minus,i;
double x_step,x_minus,weight;
if (x < x_min) {
sprintf(errmsg,"%s(L:%d) : x out of bounds: x=%e,x_min=%e",__func__,__LINE__,x,x_min);
return _FAILURE_;
}
if (x > x_max) {
sprintf(errmsg,"%s(L:%d) : x out of bounds: x=%e,x_max=%e",__func__,__LINE__,x,x_max);
return _FAILURE_;
}
x_step = (x_max-x_min)/(n_lines-1);
index_minus = (int)((x-x_min)/x_step);
x_minus = index_minus * x_step;
weight = (x-x_minus) / x_step;
for (i=0; i<n_columns; i++)
result[i] = *(array+n_columns*index_minus+i)*(1.-weight)
+ *(array+n_columns*(index_minus+1)+i)*weight;
return _SUCCESS_;
}
/**
* cubic interpolation of array with equally space abscisses
*/
int array_interpolate_cubic_equal(
double x0,
double dx,
double *yarray,
int Nx,
double x,
double * result,
ErrorMsg errmsg) {
int i;
double frac;
class_test((dx > 0 && (x<x0 || x>x0+dx*(Nx-1))),
errmsg,
"x=%e out of range [%e %e]",x,x0,x0+dx*(Nx-1));
class_test((dx < 0 && (x>x0 || x<x0+dx*(Nx-1))),
errmsg,
"x=%e out of range [%e %e]",x,x0+dx*(Nx-1),x0);
i = (int)floor((x-x0)/dx);
if (i<1) i=1;
if (i>Nx-3) i=Nx-3;
frac = (x-x0)/dx-i;
yarray += i-1;
*result=-yarray[0]*frac*(1.-frac)*(2.-frac)/6.
+yarray[1]*(1.+frac)*(1.-frac)*(2.-frac)/2.
+yarray[2]*(1.+frac)*frac*(2.-frac)/2.
+yarray[3]*(1.+frac)*frac*(frac-1.)/6.;
return _SUCCESS_;
}
int array_interpolate_parabola(double x1,
double x2,
double x3,
double x,
double y1,
double y2,
double y3,
double * y,
double * dy,
double * ddy,
ErrorMsg errmsg) {
double a,b,c;
/*
a x_i**2 + b x_i + c = y_i
a (x1**2-x2**2) + b (x1-x2) = y1-y2
a (x3**2-x2**2) + b (x3-x2) = y3-y2
a (x1**2-x2**2)(x3**2-x2**2) + b (x1-x2)(x3**2-x2**2) = (y1-y2)(x3**2-x2**2)
a (x3**2-x2**2)(x1**2-x2**2) + b (x3-x2)(x1**2-x2**2) = (y3-y2)(x1**2-x2**2)
b = [(y1-y2)(x3**2-x2**2) - (y3-y2)(x1**2-x2**2)]/(x1-x2)(x3-x2)(x3-x1)
*/
b = ((y1-y2)*(x3-x2)*(x3+x2) - (y3-y2)*(x1-x2)*(x1+x2))/(x1-x2)/(x3-x2)/(x3-x1);
a = (y1-y2-b*(x1-x2))/(x1-x2)/(x1+x2);
c = y2 - b*x2 - a*x2*x2;
*y = a*x*x + b*x + c;
*dy = 2.*a*x + b;
*ddy = 2.*a;
return _SUCCESS_;
}
/**
* Called by transfer_solve().
*/
int array_integrate_all(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
double *result) {
int i;
double sum;
sum=0.;
for (i=1; i<n_lines; i++) {
sum += 0.5 * (*(array+i*n_columns+index_y) + *(array+(i-1)*n_columns+index_y))
* (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
}
*result = sum;
return _SUCCESS_;
}
int array_smooth_trg(double * array,
int k_size,
int starting_k,
int eta_size,
int index_eta,
int radius, /*3, 5 or 7 */
ErrorMsg errmsg) {
double * smooth;
int i,j,jmin,jmax;
double weigth;
double *coeff;
smooth=malloc(k_size*sizeof(double));
if (smooth == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate smooth",__func__,__LINE__);
return _FAILURE_;
}
class_calloc(coeff,2*radius+1,sizeof(double),errmsg);
switch(radius){
case 3:
weigth = 21;
coeff[0] = -2;
coeff[1] = 3;
coeff[2] = 6;
coeff[3] = 7;
coeff[4] = 6;
coeff[5] = 3;
coeff[6] = -2;
break;
case 4:
weigth = 231;
coeff[0] = -21;
coeff[1] = 14;
coeff[2] = 39;
coeff[3] = 54;
coeff[4] = 59;
coeff[5] = 54;
coeff[6] = 39;
coeff[7] = 14;
coeff[8] = -21;
break;
case 5:
weigth = 429;
coeff[0] = -36;
coeff[1] = 9;
coeff[2] = 44;
coeff[3] = 69;
coeff[4] = 84;
coeff[5] = 89;
coeff[6] = 84;
coeff[7] = 69;
coeff[8] = 44;
coeff[9] = 9;
coeff[10] = -36;
break;
case 6:
weigth = 143;
coeff[0] = -11;
coeff[1] = 0;
coeff[2] = 9;
coeff[3] = 16;
coeff[4] = 21;
coeff[5] = 24;
coeff[6] = 25;
coeff[7] = 24;
coeff[8] = 21;
coeff[9] = 16;
coeff[10] = 9;
coeff[11] = 0;
coeff[12] = -11;
break;
case 7:
weigth = 1105;
coeff[0] = -78;
coeff[1] = -13;
coeff[2] = 42;
coeff[3] = 87;
coeff[4] = 122;
coeff[5] = 147;
coeff[6] = 162;
coeff[7] = 167;
coeff[8] = 162;
coeff[9] = 147;
coeff[10] = 122;
coeff[11] = 87;
coeff[12] = 42;
coeff[13] = -13;
coeff[14] = -78;
break;
/* case 8: */
default:
class_stop(errmsg,"Non valid radius %d: please chose between 3 4 5 or 6\n",radius);
weigth=0;
break;
}
for (i=starting_k; i<k_size-radius; i++) {
smooth[i]=0.;
jmin = MAX(i-radius,0);
jmax = MIN(i+radius,k_size-1);
for (j=jmin; j <= jmax; j++) {
smooth[i] += coeff[j-jmin]*array[j+k_size*index_eta];
}
smooth[i] /= weigth;
}
for (i=starting_k; i<k_size-radius; i++)
array[i+k_size*index_eta] = smooth[i];
free(smooth);
free(coeff);
return _SUCCESS_;
}
int array_smooth(double * array,
int n_columns,
int n_lines,
int index, /** from 0 to (n_columns-1) */
int radius,
ErrorMsg errmsg) {
double * smooth;
int i,j,jmin,jmax;
double weigth;
smooth=malloc(n_lines*sizeof(double));
if (smooth == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate smooth",__func__,__LINE__);
return _FAILURE_;
}
for (i=0; i<n_lines; i++) {
smooth[i]=0.;
weigth=0.;
jmin = MAX(i-radius,0);
jmax = MIN(i+radius,n_lines-1);
for (j=jmin; j <= jmax; j++) {
smooth[i] += array[j*n_columns+index];
weigth += 1.;
}
smooth[i] /= weigth;
}
for (i=0; i<n_lines; i++)
array[i*n_columns+index] = smooth[i];
free(smooth);
return _SUCCESS_;
}
/**
* Compute quadrature weights for the trapezoidal integration method, xhen x is in gorwing order.
*
* @param x Input: Grid points on which f() is known.
* @param n Input: number of grid points.
* @param w_trapz Output: Weights of the trapezoidal method.
* @return the error status
*/
int array_trapezoidal_weights(
double * __restrict__ x,
int n,
double * __restrict__ w_trapz,
ErrorMsg errmsg
) {
int i;
/* Case with just one point, w would normally be 0. */
if (n==1){
w_trapz[0] = 0.0;
}
else if (n>1){
//Set edgeweights:
w_trapz[0] = 0.5*(x[1]-x[0]);
w_trapz[n-1] = 0.5*(x[n-1]-x[n-2]);
//Set inner weights:
for (i=1; i<(n-1); i++){
w_trapz[i] = 0.5*(x[i+1]-x[i-1]);
}
}
return _SUCCESS_;
}
/**
* Compute quadrature weights for the trapezoidal integration method, when x is in decreasing order.
*
* @param x Input: Grid points on which f() is known.
* @param n Input: number of grid points.
* @param w_trapz Output: Weights of the trapezoidal method.
* @return the error status
*/
int array_trapezoidal_mweights(
double * __restrict__ x,
int n,
double * __restrict__ w_trapz,
ErrorMsg errmsg
) {
int i;
/* Case with just one point. */
if (n==1){
w_trapz[0] = 1.0;
}
else if (n>1){
//Set edgeweights:
w_trapz[0] = 0.5*(x[0]-x[1]);
w_trapz[n-1] = 0.5*(x[n-2]-x[n-1]);
//Set inner weights:
for (i=1; i<(n-1); i++){
w_trapz[i] = 0.5*(x[i-1]-x[i+1]);
}
}
return _SUCCESS_;
}
/**
* Compute integral of function using trapezoidal method.
*
* @param integrand Input: The function we are integrating.
* @param n Input: Compute integral on grid [0;n-1].
* @param w_trapz Input: Weights of the trapezoidal method.
* @param I Output: The integral.
* @return the error status
*/
int array_trapezoidal_integral(
double * __restrict__ integrand,
int n,
double * __restrict__ w_trapz,
double * __restrict__ I,
ErrorMsg errmsg
) {
int i;
double res=0.0;
for (i=0; i<n; i++){
res += integrand[i]*w_trapz[i];
}
*I = res;
return _SUCCESS_;
}
/**
* Compute convolution integral of product of two functions using trapezoidal method.
*
* @param integrand1 Input: Function 1.
* @param integrand2 Input: Function 2.
* @param n Input: Compute integral on grid [0;n-1].
* @param w_trapz Input: Weights of the trapezoidal method.
* @param I Output: The integral.
* @return the error status
*/
int array_trapezoidal_convolution(
double * __restrict__ integrand1,
double * __restrict__ integrand2,
int n,
double * __restrict__ w_trapz,
double * __restrict__ I,
ErrorMsg errmsg
) {
int i;
double res=0.0;
for (i=0; i<n; i++){
res += integrand1[i]*integrand2[i]*w_trapz[i];
}
*I = res;
return _SUCCESS_;
}
|
dfft_host.c | #include <stdlib.h>
#include <string.h>
#include "dfft_host.h"
#ifdef ENABLE_OPENMP
#include <omp.h>
#endif
#include <math.h>
/*****************************************************************************
* Implementation of the distributed FFT
*****************************************************************************/
/*
* Redistribute from group-cyclic with cycle c0 to cycle c1>=c0
*/
void dfft_redistribute_block_to_cyclic_1d(
int *dim,
int *pdim,
int ndim,
int current_dim,
int c0,
int c1,
int* pidx,
int size_in,
int *embed,
cpx_t *work,
cpx_t *scratch,
int *dfft_nsend,
int *dfft_nrecv,
int *dfft_offset_send,
int *dfft_offset_recv,
MPI_Comm comm,
int *proc_map,
int row_m)
{
/* exit early if nothing needs to be done */
if (c0 == c1) return;
int length = dim[current_dim]/pdim[current_dim];
/* compute stride for column major matrix storage */
int stride = size_in/embed[current_dim];
/* processor index along current dimension */
int s = pidx[current_dim];
int ratio = c1/c0;
int size = ((length/ratio > 1) ? (length/ratio) : 1);
int npackets = length/size;
size *= stride;
int pdim_tot=1;
int k;
for (k = 0; k < ndim; ++k)
pdim_tot *= pdim[k];
int t;
for (t = 0; t<pdim_tot; ++t)
{
dfft_nsend[t] = 0;
dfft_nrecv[t] = 0;
dfft_offset_send[t] = 0;
dfft_offset_recv[t] = 0;
}
int j0;
int j2;
j0 = s % c0;
j2 = s / c0;
/* initialize send offsets and pack data */
int j;
#pragma omp parallel for private(j,k)
for (j = 0; j < npackets; ++j)
{
int offset = j*size;
int jglob = j2*c0*length + j * c0 + j0;
int desti = (jglob/(c1*length))*c1+ jglob%c1;
int destproc = 0;
if (row_m)
{
for (k = ndim-1; k >=0 ;--k)
{
destproc *= pdim[k];
destproc += ((current_dim == k) ? desti : pidx[k]);
}
}
else
{
for (k = 0; k < ndim; ++k)
{
destproc *= pdim[k];
destproc += ((current_dim == k) ? desti : pidx[k]);
}
}
int rank = proc_map[destproc];
dfft_nsend[rank] = size*sizeof(cpx_t);
dfft_offset_send[rank] = offset*sizeof(cpx_t);
int r;
for(r=0; r< (size/stride); r++)
for (k=0; k < stride; k++)
scratch[offset + r*stride+k]= work[(j+r*ratio)*stride+k];
}
/* initialize recv offsets */
int offset = 0;
j0 = s % c1;
j2 = s/c1;
int r;
for (r = 0; r < npackets; ++r)
{
offset = r*size;
j = r*size/stride;
int jglob = j2*c1*length+ j * c1 + j0;
int srci = (jglob/(c0*length))*c0+jglob%c0;
int srcproc = 0;
int k;
if (row_m)
{
for (k = ndim-1; k >= 0; --k)
{
srcproc *= pdim[k];
srcproc += ((current_dim == k) ? srci : pidx[k]);
}
}
else
{
for (k = 0; k < ndim; ++k)
{
srcproc *= pdim[k];
srcproc += ((current_dim == k) ? srci : pidx[k]);
}
}
int rank = proc_map[srcproc];
dfft_nrecv[rank] = size*sizeof(cpx_t);
dfft_offset_recv[rank] = offset*sizeof(cpx_t);
}
/* synchronize */
MPI_Barrier(comm);
/* communicate */
MPI_Alltoallv(scratch,dfft_nsend, dfft_offset_send, MPI_BYTE,
work, dfft_nrecv, dfft_offset_recv, MPI_BYTE,
comm);
}
/* Redistribute from group-cyclic with cycle c0 to cycle c0>=c1
* rev=1 if local order is reversed
*
* if rev = 1 and np >= c0 (last stage) it really transforms
* into a hybrid-distribution, which after the last local ordered
* DFT becomes the cyclic distribution
*/
void dfft_redistribute_cyclic_to_block_1d(int *dim,
int *pdim,
int ndim,
int current_dim,
int c0,
int c1,
int* pidx,
int rev,
int size_in,
int *embed,
cpx_t *work,
cpx_t *scratch,
int *rho_L,
int *rho_pk0,
int *dfft_nsend,
int *dfft_nrecv,
int *dfft_offset_send,
int *dfft_offset_recv,
MPI_Comm comm,
int *proc_map,
int row_m)
{
if (c1 == c0) return;
/* length along current dimension */
int length = dim[current_dim]/pdim[current_dim];
int size = length*c1/c0;
size = (size ? size : 1);
int npackets = length/size;
int stride = size_in/embed[current_dim];
/* processor index along current dimension */
int s=pidx[current_dim];
/* number of procs along current dimension */
int p=pdim[current_dim];
size *= stride;
int offset = 0;
int recv_size,send_size;
int j0_local = s%c0;
int j2_local = s/c0;
int j0_new_local = s%c1;
int j2_new_local = s/c1;
int pdim_tot=1;
int k;
for (k = 0; k < ndim; ++k)
pdim_tot *= pdim[k];
int i;
for (i = 0; i < pdim_tot; ++i)
{
dfft_nsend[i] = 0;
dfft_nrecv[i] = 0;
dfft_offset_send[i] = 0;
dfft_offset_recv[i] = 0;
}
for (i = 0; i < p; ++i)
{
int j0_remote = i%c0;
int j2_remote = i/c0;
int j0_new_remote = i % c1;
int j2_new_remote = i/c1;
/* decision to send and/or receive */
int send = 0;
int recv = 0;
if (rev && (length >= c0))
{
/* redistribute into block with reversed processor id
and swapped-partially reversed local order (the c0 LSB
of the local index are MSB, and the n/p/c0 MSB
are LSB and are reversed */
send = (((j2_new_remote % (p/c0)) == (rho_pk0[j2_local])) ? 1 : 0);
recv = (((j2_new_local % (p/c0)) == (rho_pk0[j2_remote])) ? 1 : 0);
}
else
{
send = (((j2_new_remote / (c0/c1)) == j2_local) && ((j0_local % c1)==j0_new_remote) ? 1 : 0);
recv = (((j2_new_local / (c0/c1)) == j2_remote) && ((j0_remote % c1)==j0_new_local) ? 1 : 0);
if (length*c1 < c0)
{
send &= (j0_local/(length*c1) == j2_new_remote % (c0/(length*c1)));
recv &= (j0_remote/(length*c1) == j2_new_local % (c0/(length*c1)));
}
}
/* offset of first element sent */
int j1;
if (length*c1 >= c0)
{
j1 = (j2_new_remote % (c0/c1))*length*c1/c0;
}
else
{
j1 = (j2_new_remote / (c0/(length*c1))) % length;
}
if (rev)
{
if (length >= c0)
{
j1 = j2_new_remote/(p/c0);
}
else
j1 = rho_L[j1];
}
/* mirror remote decision to send */
send_size = (send ? size : 0);
recv_size = (recv ? size : 0);
int destproc = 0;
int k;
if (row_m)
{
for (k = ndim-1; k >=0 ;--k)
{
destproc *= pdim[k];
destproc += ((current_dim == k) ? i : pidx[k]);
}
}
else
{
for (k = 0; k < ndim; ++k)
{
destproc *= pdim[k];
destproc += ((current_dim == k) ? i : pidx[k]);
}
}
int rank = proc_map[destproc];
dfft_offset_send[rank] = (send ? (stride*j1*sizeof(cpx_t)) : 0);
if (rev && (length > c0/c1))
{
/* we are directly receving into the work buf */
dfft_offset_recv[rank] = stride*j0_remote*length/c0*sizeof(cpx_t);
}
else
{
dfft_offset_recv[rank] = offset*sizeof(cpx_t);
}
dfft_nsend[rank] = send_size*sizeof(cpx_t);
dfft_nrecv[rank] = recv_size*sizeof(cpx_t);
offset += (recv ? size : 0);
}
/* we need to pack data if the local input buffer is reversed
and we are sending more than one element */
if (rev && (size > stride))
{
offset = 0;
/*#pragma omp ... */
int i;
for (i = 0; i <p; ++i)
{
int destproc = 0;
int k;
if (row_m)
{
for (k = ndim-1; k >=0 ;--k)
{
destproc *= pdim[k];
destproc += ((current_dim == k) ? i : pidx[k]);
}
}
else
{
for (k = 0; k < ndim; ++k)
{
destproc *= pdim[k];
destproc += ((current_dim == k) ? i : pidx[k]);
}
}
int rank = proc_map[destproc];
int j1_offset = dfft_offset_send[rank]/sizeof(cpx_t)/stride;
/* we are sending from a tmp buffer/stride */
dfft_offset_send[rank] = offset*sizeof(cpx_t)*stride;
int n = dfft_nsend[rank]/stride/sizeof(cpx_t);
int j;
for (j = 0; j < n; j++)
for (k = 0; k < stride; ++ k)
scratch[(offset+j)*stride+k] = work[(j1_offset+j*c0)*stride+k];
offset += n;
}
/* perform communication */
MPI_Barrier(comm);
MPI_Alltoallv(scratch,dfft_nsend, dfft_offset_send, MPI_BYTE,
work, dfft_nrecv, dfft_offset_recv, MPI_BYTE,
comm);
}
else
{
/* perform communication */
MPI_Barrier(comm);
MPI_Alltoallv(work,dfft_nsend, dfft_offset_send, MPI_BYTE,
scratch, dfft_nrecv, dfft_offset_recv, MPI_BYTE,
comm);
/* unpack */
int r;
#pragma omp parallel for private(r)
for (r = 0; r < npackets; ++r)
{
int j1, j1_offset, del;
int j0_remote = j0_new_local + r*c1;
if (rev && (length >= c0))
{
j1_offset = j0_remote*length/c0;
del = 1;
}
else
{
j1_offset = j0_remote/c1;
del = c0/c1;
}
int j;
for (j = 0; j < (size/stride); ++j)
{
j1 = j1_offset + j*del;
int k;
for (k = 0; k < stride; ++k)
work[j1*stride+k] = scratch[r*size+j*stride+k];
}
}
}
}
/* plan_long: complete local FFT
plan_short: partial local FFT
input and output are M-cyclic (M=pdim[current_dim])
(out-of-place version, overwrites input)
*/
void mpifft1d_dif(int *dim,
int *pdim,
int ndim,
int current_dim,
int* pidx,
int inverse,
int size,
int *embed,
cpx_t *in,
cpx_t *out,
plan_t plan_short,
plan_t plan_long,
int *rho_L,
int *rho_pk0,
int *rho_Lk0,
int *dfft_nsend,
int *dfft_nrecv,
int *dfft_offset_send,
int *dfft_offset_recv,
MPI_Comm comm,
int *proc_map,
int row_m)
{
int p = pdim[current_dim];
int length = dim[current_dim]/pdim[current_dim];
int st = size/embed[current_dim]*(dim[current_dim]/pdim[current_dim]);
/* compute stride for column major matrix storage */
int stride = size/embed[current_dim];
int c;
int k0 = length;
for (c = p; c >1; c /= length)
{
#if 1
/* do local out-of-place place FFT (long-distance butterflies) */
#ifdef FFT1D_SUPPORTS_THREADS
dfft_local_1dfft(in, out, plan_long, inverse);
#else
int i;
#pragma omp parallel for
for (i = 0; i < st/length; ++i)
dfft_local_1dfft(in+i, out+i, plan_long, inverse);
#endif
/* apply twiddle factors */
double alpha = ((double)(pidx[current_dim] %c))/(double)c;
int j;
#pragma omp parallel for private(j)
for (j = 0; j < length; j++)
{
double theta = -(double)2.0 * (double)M_PI * alpha/(double) length;
cpx_t w;
RE(w) = cos((double)j*theta);
IM(w) = sin((double)j*theta);
double sign = ((inverse) ? (-1.0) : 1.0);
IM(w) *=sign;
int r;
for (r = 0; r < stride; ++r)
{
cpx_t x = out[j*stride+r];
cpx_t y;
RE(y) = RE(x) * RE(w) - IM(x) * IM(w);
IM(y) = RE(x) * IM(w) + IM(x) * RE(w);
in[j*stride+r] = y;
}
}
int rev = 1;
#else
int rev = 0;
#endif
/* in-place redistribute from group-cyclic c -> c1 */
int c1 = ((c > length) ? (c/length) : 1);
k0 = c;
dfft_redistribute_cyclic_to_block_1d(dim,pdim,ndim,current_dim, c, c1,
pidx, rev, size, embed, in,out,rho_L,rho_pk0,
dfft_nsend,dfft_nrecv,dfft_offset_send,dfft_offset_recv,
comm, proc_map, row_m);
}
/* perform remaining short-distance butterflies,
* out-of-place 1d FFT */
#ifdef FFT1D_SUPPORTS_THREADS
dfft_local_1dfft(in, out, plan_short,inverse);
#else
int i;
#pragma omp parallel for
for (i = 0; i < st/k0; ++i)
dfft_local_1dfft(in+i, out+i, plan_short, inverse);
#endif
}
/* n-dimensional fft routine (in-place)
*/
void mpifftnd_dif(int *dim,
int *pdim,
int ndim,
int* pidx,
int inv,
int size_in,
int *inembed,
int *oembed,
cpx_t *work,
cpx_t *scratch,
plan_t *plans_short,
plan_t *plans_long,
int **rho_L,
int **rho_pk0,
int **rho_Lk0,
int *dfft_nsend,
int *dfft_nrecv,
int *dfft_offset_send,
int *dfft_offset_recv,
MPI_Comm comm,
int *proc_map,
int row_m)
{
int size = size_in;
int current_dim;
for (current_dim = 0; current_dim < ndim; ++current_dim)
{
/* assume input in local column major */
mpifft1d_dif(dim, pdim,ndim,current_dim,pidx, inv,
size, inembed, work, scratch, plans_short[current_dim],
plans_long[current_dim], rho_L[current_dim],
rho_pk0[current_dim],rho_Lk0[current_dim],
dfft_nsend,dfft_nrecv,dfft_offset_send,dfft_offset_recv,
comm,proc_map, row_m);
int l = dim[current_dim]/pdim[current_dim];
int stride = size/inembed[current_dim];
/* transpose local matrix */
int i;
#pragma omp parallel for private(i)
for (i = 0; i < l; ++i)
{
int j;
for (j = 0; j < stride; ++j)
{
int gidx = j+i*stride;
int new_idx = j*oembed[current_dim]+i;
work[new_idx] = scratch[gidx];
}
}
/* update size */
size *= oembed[current_dim];
size /= inembed[current_dim];
}
}
void redistribute_nd(int *dim,
int *pdim,
int ndim,
int* pidx,
int size,
int *embed,
cpx_t *work,
cpx_t *scratch,
int *dfft_nsend,
int *dfft_nrecv,
int *dfft_offset_send,
int *dfft_offset_recv,
int c2b,
MPI_Comm comm,
int *proc_map,
int row_m)
{
cpx_t *cur_work =work;
cpx_t *cur_scratch =scratch;
int current_dim;
for (current_dim = 0; current_dim < ndim; ++current_dim)
{
/* redistribute along one dimension (in-place) */
if (!c2b)
dfft_redistribute_block_to_cyclic_1d(dim, pdim, ndim, current_dim,
1, pdim[current_dim], pidx, size, embed,
cur_work, cur_scratch, dfft_nsend,dfft_nrecv,
dfft_offset_send, dfft_offset_recv, comm, proc_map, row_m);
else
dfft_redistribute_cyclic_to_block_1d(dim, pdim, ndim, current_dim,
pdim[current_dim], 1, pidx, 0, size, embed, cur_work,
cur_scratch, NULL, NULL, dfft_nsend,
dfft_nrecv, dfft_offset_send, dfft_offset_recv, comm, proc_map, row_m);
int l = dim[current_dim]/pdim[current_dim];
int stride = size/embed[current_dim];
/* transpose local matrix from column major to row major */
int i;
#pragma omp parallel for private(i)
for (i = 0; i < l; ++i)
{
int j;
for (j = 0; j < stride; ++j)
{
int gidx = j+i*stride;
int new_idx = j*embed[current_dim]+i;
cur_scratch[new_idx] =cur_work[gidx];
}
}
/* swap buffers */
cpx_t *tmp;
tmp = cur_scratch;
cur_scratch = cur_work;
cur_work = tmp;
}
if (ndim % 2)
{
memcpy(work, scratch, sizeof(cpx_t)*size);
}
}
/*****************************************************************************
* Distributed FFT interface
*****************************************************************************/
int dfft_execute(cpx_t *h_in, cpx_t *h_out, int dir, dfft_plan p)
{
/* only works on host plans */
if (p.device) return 2;
int out_of_place = (h_in == h_out) ? 0 : 1;
cpx_t *scratch, *work;
if (out_of_place)
{
work = p.scratch;
scratch = p.scratch_2;
memcpy(work, h_in, p.size_in*sizeof(cpx_t));
}
else
{
scratch = p.scratch;
/*! FIXME need to ensure in buf size >= scratch_size */
work = h_in;
}
if ((!dir && !p.input_cyclic) || (dir && !p.output_cyclic))
{
/* redistribution of input */
redistribute_nd(p.gdim, p.pdim, p.ndim, p.pidx,
p.size_in, p.inembed, work, scratch, p.nsend,p.nrecv,
p.offset_send,p.offset_recv, 0, p.comm, p.proc_map, p.row_m);
}
/* multi-dimensional FFT */
mpifftnd_dif(p.gdim, p.pdim, p.ndim, p.pidx, dir,
p.size_in,p.inembed,p.oembed, work, scratch,
dir ? p.plans_short_inverse : p.plans_short_forward,
dir ? p.plans_long_inverse : p.plans_long_forward,
p.rho_L, p.rho_pk0, p.rho_Lk0, p.nsend,p.nrecv,
p.offset_send,p.offset_recv, p.comm, p.proc_map, p.row_m);
if ((dir && !p.input_cyclic) || (!dir && !p.output_cyclic))
{
/* redistribution of output */
redistribute_nd(p.gdim, p.pdim, p.ndim, p.pidx,
p.size_out,p.oembed, work, scratch, p.nsend,p.nrecv,
p.offset_send,p.offset_recv, 1, p.comm, p.proc_map, p.row_m);
}
if (out_of_place)
{
memcpy(h_out, work, sizeof(cpx_t)*p.size_out);
}
return 0;
}
int dfft_create_plan(dfft_plan *p,
int ndim, int *gdim, int *inembed, int *oembed,
int *pdim, int *pidx, int row_m,
int input_cyclic, int output_cyclic,
MPI_Comm comm,
int *proc_map)
{
return dfft_create_plan_common(p, ndim, gdim, inembed,
oembed, pdim, pidx, row_m,
input_cyclic, output_cyclic, comm, proc_map, 0);
}
void dfft_destroy_plan(dfft_plan plan)
{
dfft_destroy_plan_common(plan, 0);
}
|
modifier_view.h | // ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2016, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: David Weese <david.weese@fu-berlin.de>
// Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>
// ==========================================================================
// TODO(holtgrew): Split into modified_string_mod_view.h and modified_iterator_mod_view.h.
// TODO(holtgrew): Move out convert()
#ifndef SEQAN_MODIFIER_MODIFIER_VIEW_H_
#define SEQAN_MODIFIER_MODIFIER_VIEW_H_
namespace seqan
{
// ==========================================================================
// Forwards
// ==========================================================================
// ==========================================================================
// Classes
// ==========================================================================
// --------------------------------------------------------------------------
// Class ModView
// --------------------------------------------------------------------------
/*!
* @class ModViewModifiedIterator
* @extends ModifiedIterator
* @headerfile <seqan/modifier.h>
*
* @brief Transforms the character of a host using a custom functor.
*
* @signature template <typename THost, typename TFunctor>
* class ModifiedIterator<THost, ModView<TFunctor> >;
*
* @tparam THost The host iterator.
* @tparam TFunctor A unary functor type.
*/
/*!
* @class ModViewModifiedString
* @extends ModifiedString
* @headerfile <seqan/modifier.h>
*
* @brief Transforms the character of a host using a custom functor.
*
* @signature template <typename THost, typename TFunctor>
* class ModifiedString<THost, ModView<TFunctor> >;
*
* @tparam THost The host iterator.
* @tparam TFunctor A unary functor type.
*/
template <typename TFunctor>
struct ModView {};
template <typename TFunctor>
struct ModViewCargo
{
TFunctor func;
ModViewCargo() : func()
{}
};
template <typename THost, typename TFunctor>
class ModifiedIterator<THost, ModView<TFunctor> >
{
public:
typedef typename Cargo<ModifiedIterator>::Type TCargo_;
THost _host;
TCargo_ _cargo;
mutable typename Value<ModifiedIterator>::Type tmp_value;
ModifiedIterator() : _host(), tmp_value()
{}
template <typename TOtherHost>
ModifiedIterator(ModifiedIterator<TOtherHost, ModView<TFunctor> > & origin) :
_host(origin._host), _cargo(origin._cargo), tmp_value()
{}
explicit
ModifiedIterator(THost const & host) :
_host(host), tmp_value()
{}
ModifiedIterator(THost const & host, TFunctor const & functor):
_host(host), tmp_value()
{
cargo(*this).func = functor;
}
};
// --------------------------------------------------------------------------
// Class ModifiedString
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
class ModifiedString<THost, ModView<TFunctor> >
{
public:
typedef typename Pointer_<THost>::Type THostPointer_;
typedef typename Cargo<ModifiedString>::Type TCargo_;
mutable THostPointer_ _host;
TCargo_ _cargo;
mutable typename Value<ModifiedString>::Type tmp_value;
// Default constructor.
ModifiedString() : _host(), tmp_value()
{}
// Construct with the actual host.
explicit
ModifiedString(typename Parameter_<THost>::Type host):
_host(_toPointer(host)), tmp_value()
{}
// Construct with the functor.
explicit
ModifiedString(TFunctor const & functor):
_host(), tmp_value()
{
cargo(*this).func = functor;
}
// Constructor for creating a ModifiedString with const host from a non-const host.
template <typename THost_>
explicit
ModifiedString(THost_ & host,
SEQAN_CTOR_ENABLE_IF(IsConstructible<THost, THost_>)) :
_host(_toPointer(host)), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
}
// Construct with the actual host; variant with functor.
ModifiedString(typename Parameter_<THost>::Type host, TFunctor const & functor) :
_host(_toPointer(host)), tmp_value()
{
cargo(*this).func = functor;
}
// Constructor for creating a ModifiedString with const host with a non-const host; variant with functor.
template <typename THost_>
explicit
ModifiedString(THost_ & host,
TFunctor const & functor,
SEQAN_CTOR_ENABLE_IF(IsConstructible<THost, THost_>)) :
_host(_toPointer(host)), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
cargo(*this).func = functor;
}
// Constructor for innermost type; hand down to _host which is a ModifiedString itself.
template <typename THost_>
explicit
ModifiedString(THost_ && host,
SEQAN_CTOR_ENABLE_IF(IsAnInnerHost<
typename RemoveReference<THost>::Type,
typename RemoveReference<THost_>::Type >)) :
_host(std::forward<THost_>(host)), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
}
// Constructor for innermost type; hand down to _host which is a ModifiedString itself. Variant with functor.
template <typename THost_>
explicit
ModifiedString(THost_ && host,
TFunctor const & functor,
SEQAN_CTOR_ENABLE_IF(IsAnInnerHost<
typename RemoveReference<THost>::Type,
typename RemoveReference<THost_>::Type >)) :
_host(std::forward<THost_>(host)), tmp_value()
{
ignoreUnusedVariableWarning(dummy);
cargo(*this).func = functor;
}
template <typename TPos>
inline typename Reference<ModifiedString>::Type
operator[](TPos pos)
{
return value(*this, pos);
}
template <typename TPos>
inline typename Reference<ModifiedString const>::Type
operator[](TPos pos) const
{
return value(*this, pos);
}
};
// ==========================================================================
// Metafunctions
// ==========================================================================
// --------------------------------------------------------------------------
// Metafunction Cargo [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct Cargo<ModifiedIterator<THost, ModView<TFunctor> > >
{
typedef ModViewCargo<TFunctor> Type;
};
// --------------------------------------------------------------------------
// Metafunction Value [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct Value<ModifiedIterator<THost, ModView<TFunctor> > >
{
typedef typename TFunctor::result_type TResult_;
typedef typename RemoveConst_<TResult_>::Type Type;
};
template <typename THost, typename TFunctor>
struct Value<ModifiedIterator<THost, ModView<TFunctor> > const> :
Value<ModifiedIterator<THost, ModView<TFunctor> > >
{};
// --------------------------------------------------------------------------
// Metafunction GetValue [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct GetValue<ModifiedIterator<THost, ModView<TFunctor> > > :
Value<ModifiedIterator<THost, ModView<TFunctor> > >
{};
template <typename THost, typename TFunctor>
struct GetValue<ModifiedIterator<THost, ModView<TFunctor> > const> :
Value<ModifiedIterator<THost, ModView<TFunctor> > >
{};
// --------------------------------------------------------------------------
// Metafunction Reference [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct Reference<ModifiedIterator<THost, ModView<TFunctor> > > :
Value<ModifiedIterator<THost, ModView<TFunctor> > >
{};
template <typename THost, typename TFunctor>
struct Reference<ModifiedIterator<THost, ModView<TFunctor> > const> :
Value<ModifiedIterator<THost, ModView<TFunctor> > >
{};
// NOTE(h-2): ModView element access is always by copy never by reference
// This is a workaround for dangling references to the stack when
// combining infixes and modified views, more precisely:
// if you iterate over an infix of a modview then value() on the iterator
// will return reference to the tmp_value inside the moditerator
// which might have been destructed.
// This is a more general problem that stems from the fact that
// "virtual strings" of the same type (infixes, modstrings) can be
// automatically compacted into one layer, but combinations cannot.
// This workaround happens in ModView, because it is used less frequently
// then Infixes.
// --------------------------------------------------------------------------
// Metafunction Cargo [ModifiedString]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
struct Cargo< ModifiedString<THost, ModView<TFunctor> > >
{
typedef ModViewCargo<TFunctor> Type;
};
// ==========================================================================
// Functions
// ==========================================================================
// --------------------------------------------------------------------------
// Function getValue() [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > >::Type
getValue(ModifiedIterator<THost, ModView<TFunctor> > & me)
{
return cargo(me).func(getValue(host(me)));
}
template <typename THost, typename TFunctor>
inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > const>::Type
getValue(ModifiedIterator<THost, ModView<TFunctor> > const & me)
{
return cargo(me).func(getValue(host(me)));
}
// --------------------------------------------------------------------------
// Function value() [ModifiedIterator]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > >::Type
value(ModifiedIterator<THost, ModView<TFunctor> > & me)
{
return getValue(me);
}
template <typename THost, typename TFunctor>
inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > const>::Type
value(ModifiedIterator<THost, ModView<TFunctor> > const & me)
{
return getValue(me);
}
// --------------------------------------------------------------------------
// Function getValue() [ModifiedString]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor, typename TPos>
inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > >::Type
getValue(ModifiedString<THost, ModView<TFunctor> > & me, TPos pos)
{
return cargo(me).func(getValue(host(me), pos));
}
template <typename THost, typename TFunctor, typename TPos>
inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > const>::Type
getValue(ModifiedString<THost, ModView<TFunctor> > const & me, TPos pos)
{
return cargo(me).func(getValue(host(me), pos));
}
// --------------------------------------------------------------------------
// Function value() [ModifiedString]
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor, typename TPos>
inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > >::Type
value(ModifiedString<THost, ModView<TFunctor> > & me, TPos pos)
{
return getValue(me, pos);
}
template <typename THost, typename TFunctor, typename TPos>
inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > const>::Type
value(ModifiedString<THost, ModView<TFunctor> > const & me, TPos pos)
{
return getValue(me, pos);
}
// --------------------------------------------------------------------------
// Function assignModViewFunctor()
// --------------------------------------------------------------------------
template <typename THost, typename TFunctor>
inline void
assignModViewFunctor(ModifiedString<THost, ModView<TFunctor> > & me, TFunctor const & functor)
{
cargo(me).func = functor;
}
// --------------------------------------------------------------------------
// Function convert()
// --------------------------------------------------------------------------
template < typename TSequence, typename TFunctor >
inline void
convert(TSequence & sequence, TFunctor const &F)
{
#if defined (_OPENMP) && defined (SEQAN_PARALLEL)
// OpenMP does not support for loop with iterators. Therefore use index variables.
typedef typename Position<TSequence>::Type TPos;
typedef typename MakeSigned_<TPos>::Type TSignedPos;
#pragma omp parallel for if(length(sequence) > 1000000)
for(TSignedPos p = 0; p < (TSignedPos)length(sequence); ++p)
sequence[p] = F(sequence[p]);
#else
typedef typename Iterator<TSequence, Standard>::Type TIter;
TIter it = begin(sequence, Standard());
TIter itEnd = end(sequence, Standard());
for(; it != itEnd; ++it)
*it = F(*it);
#endif
}
template < typename TSequence, typename TFunctor >
inline void
convert(TSequence const & sequence, TFunctor const &F)
{
#if defined (_OPENMP) && defined (SEQAN_PARALLEL)
// OpenMP does not support for loop with iterators. Therefore use index variables.
typedef typename Position<TSequence>::Type TPos;
typedef typename MakeSigned_<TPos>::Type TSignedPos;
#pragma omp parallel for if(length(sequence) > 1000000)
for(TSignedPos p = 0; p < (TSignedPos)length(sequence); ++p)
sequence[p] = F(sequence[p]);
#else
typedef typename Iterator<TSequence const, Standard>::Type TIter;
TIter it = begin(sequence, Standard());
TIter itEnd = end(sequence, Standard());
for(; it != itEnd; ++it)
*it = F(*it);
#endif
}
} // namespace seqan
#endif // SEQAN_MODIFIER_MODIFIER_VIEW_H_
|
bias_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: chh@openailab.com
*/
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include <math.h>
int ref_bias_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, struct ir_tensor* bias_tensor,
int num_thread)
{
int channels = input_tensor->dims[1];
int h = input_tensor->dims[2];
int w = input_tensor->dims[3];
int size = h * w;
float* in_data = input_tensor->data;
float* bias = bias_tensor->data;
float* out_data = output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < channels; c++)
{
float* out_ptr = out_data + c * size;
float* in_ptr = in_data + c * size;
for (int i = 0; i < size; i++)
{
out_ptr[i] = in_ptr[i] + bias[c];
}
}
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* bias_tensor;
struct ir_tensor* output_tensor;
int layout = ir_graph->graph_layout;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
// inplace inference
// if(input_tensor->data != output_tensor->data)
// {
// TLOG_ERR("input and output are not the same mem\n");
// set_tengine_errno(EFAULT);
// return -1;
// }
int ret = ref_bias_fp32(input_tensor, output_tensor, bias_tensor, exec_graph->num_thread);
if (ret != 0)
return -1;
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_bias_hcl_ops(void* arg)
{
return register_builtin_node_ops(OP_BIAS, &hcl_node_ops);
}
static int unreg_bias_hcl_ops(void* arg)
{
return unregister_builtin_node_ops(OP_BIAS, &hcl_node_ops);
}
AUTO_REGISTER_OPS(reg_bias_hcl_ops);
AUTO_UNREGISTER_OPS(unreg_bias_hcl_ops);
|
GB_unop__identity_int64_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int64_uint16)
// op(A') function: GB (_unop_tran__identity_int64_uint16)
// C type: int64_t
// A type: uint16_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int64_uint16)
(
int64_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int64_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pi_spmd_final.c | /*
NAME: PI SPMD final version without false sharing
This program will numerically compute the integral of
4/(1+x*x)
from 0 to 1. The value of this integral is pi -- which
is great since it gives us an easy way to check the answer.
The program was parallelized using OpenMP and an SPMD
algorithm. The following OpenMP specific lines were
added:
(1) A line to include omp.h -- the include file that
contains OpenMP's function prototypes and constants.
(2) A pragma that tells OpenMP to create a team of threads
with an integer variable i being created for each thread.
(3) two function calls: one to get the thread ID (ranging
from 0 to one less than the number of threads), and the other
returning the total number of threads.
(4) A "single" construct so only one thread prints the number
of threads.
(5) A cyclic distribution of the loop by changing loop control
expressions to run from the thread ID incremented by the number
of threads. Local sums accumlated into sum[id].
(6) A barrier to make sure everyone's done.
(7) A single construct so only one thread combines the local
sums into a single global sum.
Note that this program avoids the false sharing problem
by storing partial sums into a private scalar.
History: Written by Tim Mattson, 11/99.
*/
#include <stdio.h>
#include <omp.h>
#define MAX_THREADS 4
static long num_steps = 100000000;
double step;
int main ()
{
int i,j;
double pi, full_sum = 0.0;
double start_time, run_time;
double sum[MAX_THREADS];
step = 1.0/(double) num_steps;
for(j = 1; j <= MAX_THREADS; j++)
{
omp_set_num_threads(j);
full_sum = 0.0;
start_time = omp_get_wtime();
#pragma omp parallel private(i) // i为每个线程私有
{
int id = omp_get_thread_num();
int numthreads = omp_get_num_threads();
double x;
double partial_sum = 0;
#pragma omp single // 默认有barrier(栅栏)
printf(" num_threads = %d",numthreads);
for (i = id; i < num_steps; i += numthreads){
x = (i+0.5)*step;
partial_sum += + 4.0/(1.0+x*x);
}
#pragma omp critical // 互斥,一次只有一个线程可以进入该区域,π的更新不冲突
full_sum += partial_sum;
}
pi = step * full_sum;
run_time = omp_get_wtime() - start_time;
printf("\n pi is %f in %f seconds %d threds \n ",pi,run_time,j);
}
}
|
mapper_utilities.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Philipp Bucher, Jordi Cotela
//
// See Master-Thesis P.Bucher
// "Development and Implementation of a Parallel
// Framework for Non-Matching Grid Mapping"
#if !defined(KRATOS_MAPPER_UTILITIES_H_INCLUDED)
#define KRATOS_MAPPER_UTILITIES_H_INCLUDED
// System includes
// External includes
// Project includes
#include "includes/model_part.h"
#include "custom_utilities/mapper_flags.h"
#include "custom_utilities/mapper_local_system.h"
namespace Kratos
{
namespace MapperUtilities
{
typedef std::size_t SizeType;
typedef std::size_t IndexType;
typedef Node<3> NodeType;
typedef Kratos::unique_ptr<MapperInterfaceInfo> MapperInterfaceInfoUniquePointerType;
typedef Kratos::shared_ptr<MapperInterfaceInfo> MapperInterfaceInfoPointerType;
typedef std::vector<std::vector<MapperInterfaceInfoPointerType>> MapperInterfaceInfoPointerVectorType;
typedef Kratos::unique_ptr<MapperLocalSystem> MapperLocalSystemPointer;
typedef std::vector<MapperLocalSystemPointer> MapperLocalSystemPointerVector;
typedef Kratos::shared_ptr<MapperLocalSystemPointerVector> MapperLocalSystemPointerVectorPointer;
template< class TVarType >
static void FillFunction(const NodeType& rNode,
const TVarType& rVariable,
double& rValue)
{
rValue = rNode.FastGetSolutionStepValue(rVariable);
}
template< class TVarType >
static void FillFunctionNonHist(const NodeType& rNode,
const TVarType& rVariable,
double& rValue)
{
rValue = rNode.GetValue(rVariable);
}
template< class TVarType >
static std::function<void(const NodeType&, const TVarType&, double&)>
GetFillFunction(const Kratos::Flags& rMappingOptions)
{
if (rMappingOptions.Is(MapperFlags::FROM_NON_HISTORICAL))
return &FillFunctionNonHist<TVarType>;
return &FillFunction<TVarType>;
}
template< class TVarType >
static void UpdateFunction(NodeType& rNode,
const TVarType& rVariable,
const double Value,
const double Factor)
{
rNode.FastGetSolutionStepValue(rVariable) = Value * Factor;
}
template< class TVarType >
static void UpdateFunctionWithAdd(NodeType& rNode,
const TVarType& rVariable,
const double Value,
const double Factor)
{
rNode.FastGetSolutionStepValue(rVariable) += Value * Factor;
}
template< class TVarType >
static void UpdateFunctionNonHist(NodeType& rNode,
const TVarType& rVariable,
const double Value,
const double Factor)
{
rNode.GetValue(rVariable) = Value * Factor;
}
template< class TVarType >
static void UpdateFunctionNonHistWithAdd(NodeType& rNode,
const TVarType& rVariable,
const double Value,
const double Factor)
{
rNode.GetValue(rVariable) += Value * Factor;
}
template< class TVarType >
static std::function<void(NodeType&, const TVarType&, const double, const double)>
GetUpdateFunction(const Kratos::Flags& rMappingOptions)
{
if (rMappingOptions.Is(MapperFlags::ADD_VALUES) && rMappingOptions.Is(MapperFlags::TO_NON_HISTORICAL))
return &UpdateFunctionNonHistWithAdd<TVarType>;
if (rMappingOptions.Is(MapperFlags::ADD_VALUES))
return &UpdateFunctionWithAdd<TVarType>;
if (rMappingOptions.Is(MapperFlags::TO_NON_HISTORICAL))
return &UpdateFunctionNonHist<TVarType>;
return &UpdateFunction<TVarType>;
}
template< class TVectorType, class TVarType >
void UpdateSystemVectorFromModelPart(TVectorType& rVector,
ModelPart& rModelPart,
const TVarType& rVariable,
const Kratos::Flags& rMappingOptions)
{
// Here we construct a function pointer to not have the if all the time inside the loop
const auto fill_fct = MapperUtilities::GetFillFunction<TVarType>(rMappingOptions);
const int num_local_nodes = rModelPart.GetCommunicator().LocalMesh().NumberOfNodes();
const auto nodes_begin = rModelPart.GetCommunicator().LocalMesh().NodesBegin();
#pragma omp parallel for
for (int i=0; i<num_local_nodes; i++) {
fill_fct(*(nodes_begin + i), rVariable, rVector[i]);
}
}
template< class TVectorType, class TVarType >
void UpdateModelPartFromSystemVector(const TVectorType& rVector,
ModelPart& rModelPart,
const TVarType& rVariable,
const Kratos::Flags& rMappingOptions)
{
const double factor = rMappingOptions.Is(MapperFlags::SWAP_SIGN) ? -1.0 : 1.0;
// Here we construct a function pointer to not have the if all the time inside the loop
const auto update_fct = std::bind(MapperUtilities::GetUpdateFunction<TVarType>(rMappingOptions),
std::placeholders::_1,
std::placeholders::_2,
std::placeholders::_3,
factor);
const int num_local_nodes = rModelPart.GetCommunicator().LocalMesh().NumberOfNodes();
const auto nodes_begin = rModelPart.GetCommunicator().LocalMesh().NodesBegin();
#pragma omp parallel for
for (int i=0; i<num_local_nodes; i++) {
update_fct(*(nodes_begin + i), rVariable, rVector[i]);
}
}
/**
* @brief Assigning INTERFACE_EQUATION_IDs to the nodes, with and without MPI
* This function assigns the INTERFACE_EQUATION_IDs to the nodes, which
* act as EquationIds for the MappingMatrix. This work with and without MPI,
* in MPI a ScanSum is performed with the local number of nodes
* @param rModelPartCommunicator The Modelpart-Communicator to be used
* @author Philipp Bucher
*/
void AssignInterfaceEquationIds(Communicator& rModelPartCommunicator);
template<class TMapperLocalSystem>
void CreateMapperLocalSystemsFromNodes(const Communicator& rModelPartCommunicator,
std::vector<Kratos::unique_ptr<MapperLocalSystem>>& rLocalSystems)
{
const std::size_t num_nodes = rModelPartCommunicator.LocalMesh().NumberOfNodes();
const auto nodes_ptr_begin = rModelPartCommunicator.LocalMesh().Nodes().ptr_begin();
if (rLocalSystems.size() != num_nodes) {
rLocalSystems.resize(num_nodes);
}
#pragma omp parallel for
for (int i = 0; i< static_cast<int>(num_nodes); ++i) {
auto it_node = nodes_ptr_begin + i;
rLocalSystems[i] = Kratos::make_unique<TMapperLocalSystem>((*it_node).get());
}
int num_local_systems = rLocalSystems.size(); // int bcs of MPI
rModelPartCommunicator.SumAll(num_local_systems);
KRATOS_ERROR_IF_NOT(num_local_systems > 0)
<< "No mapper local systems were created" << std::endl;
}
inline int ComputeNumberOfNodes(ModelPart& rModelPart)
{
int num_nodes = rModelPart.GetCommunicator().LocalMesh().NumberOfNodes();
rModelPart.GetCommunicator().SumAll(num_nodes); // Compute the sum among the partitions
return num_nodes;
}
inline int ComputeNumberOfConditions(ModelPart& rModelPart)
{
int num_conditions = rModelPart.GetCommunicator().LocalMesh().NumberOfConditions();
rModelPart.GetCommunicator().SumAll(num_conditions); // Compute the sum among the partitions
return num_conditions;
}
inline int ComputeNumberOfElements(ModelPart& rModelPart)
{
int num_elements = rModelPart.GetCommunicator().LocalMesh().NumberOfElements();
rModelPart.GetCommunicator().SumAll(num_elements); // Compute the sum among the partitions
return num_elements;
}
inline double ComputeDistance(const array_1d<double, 3>& rCoords1,
const array_1d<double, 3>& rCoords2)
{
return std::sqrt( std::pow(rCoords1[0] - rCoords2[0] , 2) +
std::pow(rCoords1[1] - rCoords2[1] , 2) +
std::pow(rCoords1[2] - rCoords2[2] , 2) );
}
template <typename T>
inline double ComputeMaxEdgeLengthLocal(const T& rEntityContainer)
{
double max_element_size = 0.0;
// Loop through each edge of a geometrical entity ONCE
for (const auto& r_entity : rEntityContainer) {
for (std::size_t i = 0; i < (r_entity.GetGeometry().size() - 1); ++i) {
for (std::size_t j = i + 1; j < r_entity.GetGeometry().size(); ++j) {
double edge_length = ComputeDistance(r_entity.GetGeometry()[i].Coordinates(),
r_entity.GetGeometry()[j].Coordinates());
max_element_size = std::max(max_element_size, edge_length);
}
}
}
return max_element_size;
}
inline double ComputeMaxEdgeLengthLocal(const ModelPart::NodesContainerType& rNodes)
{
double max_element_size = 0.0;
// TODO modify loop such that it loop only once over the nodes
for (const auto& r_node_1 : rNodes) {
for (const auto& r_node_2 : rNodes) {
double edge_length = ComputeDistance(r_node_1.Coordinates(),
r_node_2.Coordinates());
max_element_size = std::max(max_element_size, edge_length);
}
}
return max_element_size;
}
double ComputeSearchRadius(ModelPart& rModelPart, int EchoLevel);
inline double ComputeSearchRadius(ModelPart& rModelPart1, ModelPart& rModelPart2, const int EchoLevel)
{
double search_radius = std::max(ComputeSearchRadius(rModelPart1, EchoLevel),
ComputeSearchRadius(rModelPart2, EchoLevel));
KRATOS_INFO_IF("Mapper", EchoLevel > 0) << "Computed search-radius: "
<< search_radius << std::endl;
return search_radius;
}
void CheckInterfaceModelParts(const int CommRank);
std::vector<double> ComputeLocalBoundingBox(ModelPart& rModelPart);
void ComputeBoundingBoxesWithTolerance(const std::vector<double>& rBoundingBoxes,
const double Tolerance,
std::vector<double>& rBoundingBoxesWithTolerance);
std::string BoundingBoxStringStream(const std::vector<double>& rBoundingBox);
bool PointIsInsideBoundingBox(const std::vector<double>& rBoundingBox,
const array_1d<double, 3>& rCoords);
void FillBufferBeforeLocalSearch(const MapperLocalSystemPointerVector& rMapperLocalSystems,
const std::vector<double>& rBoundingBoxes,
const SizeType BufferSizeEstimate,
std::vector<std::vector<double>>& rSendBuffer,
std::vector<int>& rSendSizes);
void CreateMapperInterfaceInfosFromBuffer(const std::vector<std::vector<double>>& rRecvBuffer,
const MapperInterfaceInfoUniquePointerType& rpRefInterfaceInfo,
const int CommRank,
MapperInterfaceInfoPointerVectorType& rMapperInterfaceInfosContainer);
void FillBufferAfterLocalSearch(MapperInterfaceInfoPointerVectorType& rMapperInterfaceInfosContainer,
const MapperInterfaceInfoUniquePointerType& rpRefInterfaceInfo,
const int CommRank,
std::vector<std::vector<char>>& rSendBuffer,
std::vector<int>& rSendSizes);
void AssignInterfaceInfosAfterRemoteSearch(const MapperInterfaceInfoPointerVectorType& rMapperInterfaceInfosContainer,
MapperLocalSystemPointerVectorPointer& rpMapperLocalSystems);
void DeserializeMapperInterfaceInfosFromBuffer(
const std::vector<std::vector<char>>& rSendBuffer,
const MapperInterfaceInfoUniquePointerType& rpRefInterfaceInfo,
const int CommRank,
MapperInterfaceInfoPointerVectorType& rMapperInterfaceInfosContainer);
/**
* @class MapperInterfaceInfoSerializer
* @ingroup MappingApplication
* @brief Helper class to serialize/deserialize a vector containing MapperInterfaceInfos
* @details This class serializes the vector containing the MapperInterfaceInfos (Shared Ptrs)
* The goal of this class is to have a more efficient/faster implementation than the
* one of the Serializer by avoiding the casting that is done in the serializer when pointers
* are serialized
* @TODO test the performance against the Serializer
* @author Philipp Bucher
*/
class MapperInterfaceInfoSerializer
{
public:
MapperInterfaceInfoSerializer(std::vector<MapperInterfaceInfoPointerType>& rMapperInterfaceInfosContainer,
const MapperInterfaceInfoUniquePointerType& rpRefInterfaceInfo)
: mrInterfaceInfos(rMapperInterfaceInfosContainer)
, mrpRefInterfaceInfo(rpRefInterfaceInfo->Create())
{ }
private:
std::vector<MapperInterfaceInfoPointerType>& mrInterfaceInfos;
MapperInterfaceInfoPointerType mrpRefInterfaceInfo;
friend class Kratos::Serializer; // Adding "Kratos::" is nedded bcs of the "MapperUtilities"-namespace
virtual void save(Kratos::Serializer& rSerializer) const;
virtual void load(Kratos::Serializer& rSerializer);
};
} // namespace MapperUtilities.
} // namespace Kratos.
#endif // KRATOS_MAPPER_UTILITIES_H_INCLUDED defined
|
GB_unop__identity_int16_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_uint16)
// op(A') function: GB (_unop_tran__identity_int16_uint16)
// C type: int16_t
// A type: uint16_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = (int16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_uint16)
(
int16_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
collatzconjecture.c | #include <omp.h>
#include <stdlib.h>
#include <stdio.h>
int main (int argc, char* argv[]) {
long long Nmax = atoll(argv[1]);
long long Imax = Nmax;
long long n;
long long i, j;
long long high = 0;
double startTime, endTime;
startTime = omp_get_wtime();
#pragma omp parallel for schedule(static, 500) private (n, i) reduction(max:high)
for (j = 1; j < Nmax; j++) {
n=j;
for (i = 1; i < Imax; i++) {
if (n % 2 == 0) {
n = n/2;
} else {
n = 3*n + 1;
}
if (n > high) high = n;
if (n==1) break;
}
}
endTime = omp_get_wtime();
printf("High: %lld\n", high);
printf("Runtime: %.16f\n", endTime - startTime);
return 0;
}
|
GB_binop__times_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__times_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__times_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__times_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fp64)
// A*D function (colscale): GB (_AxD__times_fp64)
// D*A function (rowscale): GB (_DxB__times_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__times_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__times_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fp64)
// C=scalar+B GB (_bind1st__times_fp64)
// C=scalar+B' GB (_bind1st_tran__times_fp64)
// C=A+scalar GB (_bind2nd__times_fp64)
// C=A'+scalar GB (_bind2nd_tran__times_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_FP64 || GxB_NO_TIMES_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__times_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mandel_wrong.c | /*
*
* An incorrect parallel implementation of computing the area of the mandelbrot set.
* Can you figure out how to fix it?
*
* Author: Matt Cufari
* Version: 1.2.0
* Date Created Jan 4 2021
* Date Last Modified Jan 4 2021
*
*/
#include <omp.h>
#include <stdio.h>
#define NPOINTS 1000
#define MXITR 10000
struct d_complex{
double r; double i;
}; //Complex data type
void testpoint(struct d_complex); //Declaration of function
struct d_complex c; //Point C in complex plane
int numoutside = 0; //Number of points outside of set
//This is where the comments end. Good luck!
int main(){
int i,j;
double area, error, eps = 1.0e-7;
#pragma omp parallel for default(shared) private(eps, c)
for(i = 0; i < NPOINTS; i++){
for(j = 0; j<NPOINTS; j++){
c.r = -2.0 + 2.5*(double)(i)/((double)(NPOINTS)+eps);
c.i = 1.125 * (double)(j)/((double)(NPOINTS)+eps);
testpoint(c);
}
}
printf("NUMOUTSIDE: %d\n", numoutside);
area = 2.0*2.5*1.125 * (double)(NPOINTS*NPOINTS-numoutside)/(double)(NPOINTS*NPOINTS);
error = area/(double)NPOINTS;
printf("Area is: %f\n", area);
printf("error is: %f\n", error);
}
void testpoint(struct d_complex c){
struct d_complex z;
int iter;
double temp;
z = c;
for(iter = 0; iter<MXITR; iter++){
temp = (z.r*z.r)-(z.i*z.i)+c.r;
z.i = z.r*z.i*2+c.i;
z.r = temp;
if((z.r*z.r + z.i*z.i) > 4.0){
numoutside++;
break;
}
}
}
|
LAGraph_BF_full.c | //------------------------------------------------------------------------------
// LAGraph_BF_full.c: Bellman-Ford single-source shortest paths, returns tree
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2019 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph_BF_full: Bellman-Ford single source shortest paths, returning both
// the path lengths and the shortest-path tree. contributed by Jinhao Chen and
// Tim Davis, Texas A&M.
// LAGraph_BF_full performs a Bellman-Ford to find out shortest path, parent
// nodes along the path and the hops (number of edges) in the path from given
// source vertex s in the range of [0, n) on graph given as matrix A with size
// n*n. The sparse matrix A has entry A(i, j) if there is an edge from vertex i
// to vertex j with weight w, then A(i, j) = w. Furthermore, LAGraph_BF_full
// requires A(i, i) = 0 for all 0 <= i < n.
// TODO: think about the return values
// LAGraph_BF_full returns GrB_SUCCESS regardless of existence of negative-
// weight cycle. However, the GrB_Vector d(k), pi(k) and h(k) (i.e.,
// *pd_output, *ppi_output and *ph_output respectively) will be NULL when
// negative-weight cycle detected. Otherwise, the vector d has d(k) as the
// shortest distance from s to k. pi(k) = p+1, where p is the parent node of
// k-th node in the shortest path. In particular, pi(s) = 0. h(k) = hop(s, k),
// the number of edges from s to k in the shortest path.
//------------------------------------------------------------------------------
#include "LAGraph_internal.h"
#define LAGRAPH_FREE_ALL \
{ \
GrB_free(&d); \
GrB_free(&dtmp); \
GrB_free(&Atmp); \
GrB_free(&BF_Tuple3); \
GrB_free(&BF_lMIN_Tuple3); \
GrB_free(&BF_PLUSrhs_Tuple3); \
GrB_free(&BF_EQ_Tuple3); \
GrB_free(&BF_lMIN_Tuple3_Monoid); \
GrB_free(&BF_lMIN_PLUSrhs_Tuple3); \
LAGRAPH_FREE (I); \
LAGRAPH_FREE (J); \
LAGRAPH_FREE (w); \
LAGRAPH_FREE (W); \
LAGRAPH_FREE (h); \
LAGRAPH_FREE (pi); \
}
//------------------------------------------------------------------------------
// data type for each entry of the adjacent matrix A and "distance" vector d;
// <INFINITY,INFINITY,INFINITY> corresponds to nonexistence of a path, and
// the value <0, 0, NULL> corresponds to a path from a vertex to itself
//------------------------------------------------------------------------------
typedef struct
{
double w; // w corresponds to a path weight.
GrB_Index h; // h corresponds to a path size or number of hops.
GrB_Index pi;// pi corresponds to the penultimate vertex along a path.
// vertex indexed as 1, 2, 3, ... , V, and pi = 0 (as nil)
// for u=v, and pi = UINT64_MAX (as inf) for (u,v) not in E
}
BF_Tuple3_struct;
//------------------------------------------------------------------------------
// 2 binary functions, z=f(x,y), where Tuple3xTuple3 -> Tuple3
//------------------------------------------------------------------------------
void BF_lMIN
(
BF_Tuple3_struct *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
if (x->w < y->w
|| (x->w == y->w && x->h < y->h)
|| (x->w == y->w && x->h == y->h && x->pi < y->pi))
{
if (z != x) { *z = *x; }
}
else
{
*z = *y;
}
}
void BF_PLUSrhs
(
BF_Tuple3_struct *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
z->w = x->w + y->w;
z->h = x->h + y->h;
if (x->pi != UINT64_MAX && y->pi != 0)
{
z->pi = y->pi;
}
else
{
z->pi = x->pi;
}
}
void BF_EQ
(
bool *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
if (x->w == y->w && x->h == y->h && x->pi == y->pi)
{
*z = true;
}
else
{
*z = false;
}
}
// Given a n-by-n adjacency matrix A and a source vertex s.
// If there is no negative-weight cycle reachable from s, return the distances
// of shortest paths from s and parents along the paths as vector d. Otherwise,
// returns d=NULL if there is a negtive-weight cycle.
// pd_output is pointer to a GrB_Vector, where the i-th entry is d(s,i), the
// sum of edges length in the shortest path
// ppi_output is pointer to a GrB_Vector, where the i-th entry is pi(i), the
// parent of i-th vertex in the shortest path
// ph_output is pointer to a GrB_Vector, where the i-th entry is h(s,i), the
// number of edges from s to i in the shortest path
// A has zeros on diagonal and weights on corresponding entries of edges
// s is given index for source vertex
GrB_Info LAGraph_BF_full
(
GrB_Vector *pd_output, //the pointer to the vector of distance
GrB_Vector *ppi_output, //the pointer to the vector of parent
GrB_Vector *ph_output, //the pointer to the vector of hops
const GrB_Matrix A, //matrix for the graph
const GrB_Index s //given index of the source
)
{
GrB_Info info;
// tmp vector to store distance vector after n (i.e., V) loops
GrB_Vector d = NULL, dtmp = NULL;
GrB_Matrix Atmp = NULL;
GrB_Type BF_Tuple3;
GrB_BinaryOp BF_lMIN_Tuple3;
GrB_BinaryOp BF_PLUSrhs_Tuple3;
GrB_BinaryOp BF_EQ_Tuple3;
GrB_Monoid BF_lMIN_Tuple3_Monoid;
GrB_Semiring BF_lMIN_PLUSrhs_Tuple3;
GrB_Index nrows, ncols, n, nz; // n = # of row/col, nz = # of nnz in graph
GrB_Index *I = NULL, *J = NULL; // for col/row indices of entries from A
GrB_Index *h = NULL, *pi = NULL;
double *w = NULL;
BF_Tuple3_struct *W = NULL;
if (A == NULL || pd_output == NULL ||
ppi_output == NULL || ph_output == NULL)
{
// required argument is missing
LAGRAPH_ERROR ("required arguments are NULL", GrB_NULL_POINTER) ;
}
*pd_output = NULL;
*ppi_output = NULL;
*ph_output = NULL;
LAGRAPH_OK (GrB_Matrix_nrows (&nrows, A)) ;
LAGRAPH_OK (GrB_Matrix_ncols (&ncols, A)) ;
LAGRAPH_OK (GrB_Matrix_nvals (&nz, A));
if (nrows != ncols)
{
// A must be square
LAGRAPH_ERROR ("A must be square", GrB_INVALID_VALUE) ;
}
n = nrows;
if (s >= n || s < 0)
{
LAGRAPH_ERROR ("invalid value for source vertex s", GrB_INVALID_VALUE);
}
//--------------------------------------------------------------------------
// create all GrB_Type GrB_BinaryOp GrB_Monoid and GrB_Semiring
//--------------------------------------------------------------------------
// GrB_Type
LAGRAPH_OK (GrB_Type_new(&BF_Tuple3, sizeof(BF_Tuple3_struct)));
// GrB_BinaryOp
LAGRAPH_OK (GrB_BinaryOp_new(&BF_EQ_Tuple3,
(LAGraph_binary_function) (&BF_EQ), GrB_BOOL, BF_Tuple3, BF_Tuple3));
LAGRAPH_OK (GrB_BinaryOp_new(&BF_lMIN_Tuple3,
(LAGraph_binary_function) (&BF_lMIN), BF_Tuple3, BF_Tuple3, BF_Tuple3));
LAGRAPH_OK (GrB_BinaryOp_new(&BF_PLUSrhs_Tuple3,
(LAGraph_binary_function)(&BF_PLUSrhs),
BF_Tuple3, BF_Tuple3, BF_Tuple3));
// GrB_Monoid
BF_Tuple3_struct BF_identity = (BF_Tuple3_struct) { .w = INFINITY,
.h = UINT64_MAX, .pi = UINT64_MAX };
LAGRAPH_OK (GrB_Monoid_new_UDT(&BF_lMIN_Tuple3_Monoid, BF_lMIN_Tuple3,
&BF_identity));
//GrB_Semiring
LAGRAPH_OK (GrB_Semiring_new(&BF_lMIN_PLUSrhs_Tuple3,
BF_lMIN_Tuple3_Monoid, BF_PLUSrhs_Tuple3));
//--------------------------------------------------------------------------
// allocate arrays used for tuplets
//--------------------------------------------------------------------------
I = LAGraph_malloc (nz, sizeof(GrB_Index)) ;
J = LAGraph_malloc (nz, sizeof(GrB_Index)) ;
w = LAGraph_malloc (nz, sizeof(double)) ;
W = LAGraph_malloc (nz, sizeof(BF_Tuple3_struct)) ;
if (I == NULL || J == NULL || w == NULL || W == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// create matrix Atmp based on A, while its entries become BF_Tuple3 type
//--------------------------------------------------------------------------
LAGRAPH_OK (GrB_Matrix_extractTuples_FP64(I, J, w, &nz, A));
int nthreads = LAGraph_get_nthreads ( ) ;
printf ("nthreads %d\n", nthreads) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (GrB_Index k = 0; k < nz; k++)
{
if (w[k] == 0) //diagonal entries
{
W[k] = (BF_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 };
}
else
{
W[k] = (BF_Tuple3_struct) { .w = w[k], .h = 1, .pi = I[k] + 1 };
}
}
LAGRAPH_OK (GrB_Matrix_new(&Atmp, BF_Tuple3, n, n));
LAGRAPH_OK (GrB_Matrix_build_UDT(Atmp, I, J, W, nz, BF_lMIN_Tuple3));
//--------------------------------------------------------------------------
// create and initialize "distance" vector d
//--------------------------------------------------------------------------
LAGRAPH_OK (GrB_Vector_new(&d, BF_Tuple3, n));
// initial distance from s to itself
BF_Tuple3_struct d0 = (BF_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 };
LAGRAPH_OK (GrB_Vector_setElement_UDT(d, &d0, s));
//--------------------------------------------------------------------------
// start the Bellman Ford process
//--------------------------------------------------------------------------
// copy d to dtmp in order to create a same size of vector
LAGRAPH_OK (GrB_Vector_dup(&dtmp, d));
bool same= false; // variable indicating if d == dtmp
int64_t iter = 0; // number of iterations
// terminate when no new path is found or more than V-1 loops
while (!same && iter < n - 1)
{
// execute semiring on d and A, and save the result to dtmp
LAGRAPH_OK (GrB_vxm(dtmp, GrB_NULL, GrB_NULL, BF_lMIN_PLUSrhs_Tuple3,
d, Atmp, GrB_NULL));
LAGRAPH_OK (LAGraph_Vector_isequal(&same, dtmp, d, BF_EQ_Tuple3));
if (!same)
{
GrB_Vector ttmp = dtmp;
dtmp = d;
d = ttmp;
}
iter ++;
}
// check for negative-weight cycle only when there was a new path in the
// last loop, otherwise, there can't be a negative-weight cycle.
if (!same)
{
// execute semiring again to check for negative-weight cycle
LAGRAPH_OK (GrB_vxm(dtmp, GrB_NULL, GrB_NULL, BF_lMIN_PLUSrhs_Tuple3,
d, Atmp, GrB_NULL));
// if d != dtmp, then there is a negative-weight cycle in the graph
LAGRAPH_OK (LAGraph_Vector_isequal(&same, dtmp, d, BF_EQ_Tuple3));
if (!same)
{
// printf("A negative-weight cycle found. \n");
LAGRAPH_FREE_ALL;
return (GrB_SUCCESS) ;
}
}
//--------------------------------------------------------------------------
// extract tuple from "distance" vector d and create GrB_Vectors for output
//--------------------------------------------------------------------------
LAGRAPH_OK (GrB_Vector_extractTuples_UDT (I, (void *) W, &n, d));
h = LAGraph_malloc (n, sizeof(GrB_Index)) ;
pi = LAGraph_malloc (n, sizeof(GrB_Index)) ;
if (w == NULL || h == NULL || pi == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
for (GrB_Index k = 0; k < n; k++)
{
w [k] = W[k].w ;
h [k] = W[k].h ;
pi[k] = W[k].pi;
}
LAGRAPH_OK (GrB_Vector_new(pd_output, GrB_FP64, n));
LAGRAPH_OK (GrB_Vector_new(ppi_output, GrB_UINT64, n));
LAGRAPH_OK (GrB_Vector_new(ph_output, GrB_UINT64, n));
LAGRAPH_OK (GrB_Vector_build_FP64 (*pd_output , I, w , n, GrB_MIN_FP64 ));
LAGRAPH_OK (GrB_Vector_build_UINT64(*ppi_output, I, pi, n, GrB_MIN_UINT64));
LAGRAPH_OK (GrB_Vector_build_UINT64(*ph_output , I, h , n, GrB_MIN_UINT64));
LAGRAPH_FREE_ALL;
return (GrB_SUCCESS) ;
}
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
{
typedef gebp_traits<RhsScalar,LhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* _res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;
LhsMapper lhs(_lhs,lhsStride);
RhsMapper rhs(_rhs,rhsStride);
ResMapper res(_res, resStride);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
int tid = omp_get_thread_num();
int threads = omp_get_num_threads();
LhsScalar* blockA = blocking.blockA();
eigen_internal_assert(blockA!=0);
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing B'.
pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc);
// Pack A_k to A' in a parallel fashion:
// each thread packs the sub block A_k,i to A'_i where i is the thread id.
// However, before copying to A'_i, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
// Notify the other threads that the part A'_i is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per A'_i
for(int shift=0; shift<threads; ++shift)
{
int i = (tid+shift)%threads;
// At this point we have to make sure that A'_i has been updated by the thread i,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if (shift>0) {
while(info[i].sync!=k) {
}
}
gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);
}
// Then keep going as usual with the remaining B'
for(Index j=nc; j<cols; j+=nc)
{
const Index actual_nc = (std::min)(j+nc,cols)-j;
// pack B_k,j to B'
pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc);
// C_j += A' * B'
gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha);
}
// Release all the sub blocks A'_i of A' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index i=0; i<threads; ++i)
#pragma omp atomic
info[i].users -= 1;
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols;
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)
// Note that this panel will be read as many times as the number of blocks in the rhs's
// horizontal panel which is, in practice, a very low number.
pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc);
// For each kc x nc block of the rhs's horizontal panel...
for(Index j2=0; j2<cols; j2+=nc)
{
const Index actual_nc = (std::min)(j2+nc,cols)-j2;
// We pack the rhs's block into a sequential chunk of memory (L2 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro horizontal panel of the large rhs's panel (e.g., rows/12 times).
if((!pack_rhs_once) || i2==0)
pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc);
// Everything is packed, we can now call the panel * block kernel:
gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha);
}
}
}
}
}
};
/*********************************************************************************
* Specialization of generic_product_impl for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession(Index num_threads) const
{
m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);
m_blocking.allocateA();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
typedef typename Gemm::Traits Traits;
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
Index m_mc;
Index m_nc;
Index m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline Index mc() const { return m_mc; }
inline Index nc() const { return m_nc; }
inline Index kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth
};
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA];
EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB];
#else
EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
#endif
public:
gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
#else
this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
#endif
}
void initParallel(Index, Index, Index, Index)
{}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index m_sizeA;
Index m_sizeB;
public:
gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
if(l3_blocking)
{
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads);
}
else // no l3 blocking
{
Index n = this->m_nc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads);
}
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void initParallel(Index rows, Index cols, Index depth, Index num_threads)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0);
Index m = this->m_mc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateAll()
{
allocateA();
allocateB();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
}
};
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs>
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct;
template<typename Dst>
static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
// See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=404 for a discussion and helper program
// to determine the following heuristic.
// EIGEN_GEMM_TO_COEFFBASED_THRESHOLD is typically defined to 20 in GeneralProduct.h,
// unless it has been specialized by the user or for a given architecture.
// Note that the condition rhs.rows()>0 was required because lazy product is (was?) not happy with empty inputs.
// I'm not sure it is still required.
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>());
else
{
dst.setZero();
scaleAndAddTo(dst, lhs, rhs, Scalar(1));
}
}
template<typename Dst>
static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst,lhs, rhs, Scalar(1));
}
template<typename Dst>
static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<EIGEN_GEMM_TO_COEFFBASED_THRESHOLD && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
}
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha)
{
eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)
return;
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
* RhsBlasTraits::extractScalarFactor(a_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>
(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
GB_binop__times_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_int16)
// A*D function (colscale): GB (_AxD__times_int16)
// D*A function (rowscale): GB (_DxB__times_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__times_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__times_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_int16)
// C=scalar+B GB (_bind1st__times_int16)
// C=scalar+B' GB (_bind1st_tran__times_int16)
// C=A+scalar GB (_bind2nd__times_int16)
// C=A'+scalar GB (_bind2nd_tran__times_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_INT16 || GxB_NO_TIMES_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
prox_lib.h | /*!
* Modifications Copyright 2017 H2O.ai, Inc.
*/
#ifndef PROX_LIB_H_
#define PROX_LIB_H_
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <limits>
#include <vector>
#ifdef __CUDACC__
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
#define __DEVICE__ __device__
#else
#define __DEVICE__
#endif
#include "interface_defs.h"
// List of functions supported by the proximal operator library.
enum Function { kAbs, // f(x) = |x|
kExp, // f(x) = e^x
kHuber, // f(x) = huber(x)
kIdentity, // f(x) = x
kIndBox01, // f(x) = I(0 <= x <= 1)
kIndEq0, // f(x) = I(x = 0)
kIndGe0, // f(x) = I(x >= 0)
kIndLe0, // f(x) = I(x <= 0)
kLogistic, // f(x) = log(1 + e^x)
kMaxNeg0, // f(x) = max(0, -x)
kMaxPos0, // f(x) = max(0, x)
kNegEntr, // f(x) = x log(x)
kNegLog, // f(x) = -log(x)
kRecipr, // f(x) = 1/x
kSquare, // f(x) = (1/2) x^2
kZero }; // f(x) = 0
// Object associated with the generic function c * f(a * x - b) + d * x + e * x * x.
// Parameters a and c default to 1, while b, d and e default to 0.
template <typename T>
struct FunctionObj {
Function h;
T a, b, c, d, e;
FunctionObj(Function h, T a, T b, T c, T d, T e)
: h(h), a(a), b(b), c(c), d(d), e(e) { CheckConsts(); }
FunctionObj(Function h, T a, T b, T c, T d)
: h(h), a(a), b(b), c(c), d(d), e(0) { CheckConsts(); }
FunctionObj(Function h, T a, T b, T c)
: h(h), a(a), b(b), c(c), d(0), e(0) { CheckConsts(); }
FunctionObj(Function h, T a, T b)
: h(h), a(a), b(b), c(1), d(0), e(0) { }
FunctionObj(Function h, T a)
: h(h), a(a), b(0), c(1), d(0), e(0) { }
explicit FunctionObj(Function h)
: h(h), a(1), b(0), c(1), d(0), e(0) { }
FunctionObj()
: h(kZero), a(1), b(0), c(1), d(0), e(0) { }
void CheckConsts() {
if (c < static_cast<T>(0))
Printf("WARNING c < 0. Function not convex. Using c = 0");
if (e < static_cast<T>(0))
Printf("WARNING e < 0. Function not convex. Using e = 0");
c = std::max(c, static_cast<T>(0));
e = std::max(e, static_cast<T>(0));
}
};
// Local Functions.
namespace {
// Evaluate abs(x)
__DEVICE__ inline double Abs(double x) { return fabs(x); }
__DEVICE__ inline float Abs(float x) { return fabsf(x); }
// Evaluate acos(x)
__DEVICE__ inline double Acos(double x) { return acos(x); }
__DEVICE__ inline float Acos(float x) { return acosf(x); }
// Evaluate cos(x)
__DEVICE__ inline double Cos(double x) { return cos(x); }
__DEVICE__ inline float Cos(float x) { return cosf(x); }
// Evaluate e^x
__DEVICE__ inline double Exp(double x) { return exp(x); }
__DEVICE__ inline float Exp(float x) { return expf(x); }
// Evaluate log(x)
__DEVICE__ inline double Log(double x) { return log(x); }
__DEVICE__ inline float Log(float x) { return logf(x); }
// Evaluate max(x, y)
__DEVICE__ inline double Max(double x, double y) { return fmax(x, y); }
__DEVICE__ inline float Max(float x, float y) { return fmaxf(x, y); }
// Evaluate max(x, y)
__DEVICE__ inline double Min(double x, double y) { return fmin(x, y); }
__DEVICE__ inline float Min(float x, float y) { return fminf(x, y); }
// Evaluate x^y
__DEVICE__ inline double Pow(double x, double y) { return pow(x, y); }
__DEVICE__ inline float Pow(float x, float y) { return powf(x, y); }
// Evaluate sqrt(x)
__DEVICE__ inline double Sqrt(double x) { return sqrt(x); }
__DEVICE__ inline float Sqrt(float x) { return sqrtf(x); }
// Numeric Epsilon.
template <typename T>
__DEVICE__ inline T Epsilon();
template <>
__DEVICE__ inline double Epsilon<double>() { return 4e-16; }
template <>
__DEVICE__ inline float Epsilon<float>() { return 1e-7f; }
// Evaluate tol
template <typename T>
__DEVICE__ inline T Tol();
template <>
__DEVICE__ inline double Tol() { return 1e-10; }
template <>
__DEVICE__ inline float Tol() { return 1e-5f; }
// Evalution of max(0, x).
template <typename T>
__DEVICE__ inline T MaxPos(T x) {
return Max(static_cast<T>(0), x);
}
// Evalution of max(0, -x).
template <typename T>
__DEVICE__ inline T MaxNeg(T x) {
return Max(static_cast<T>(0), -x);
}
// Evalution of sign(x)
template <typename T>
__DEVICE__ inline T Sign(T x) {
return x >= 0 ? 1 : -1;
}
// LambertW(Exp(x))
// Evaluate the principal branch of the Lambert W function.
// ref: http://keithbriggs.info/software/LambertW.c
template <typename T>
__DEVICE__ inline T LambertWExp(T x) {
T w;
if (x > static_cast<T>(100)) {
// Approximation for x in [100, 700].
T log_x = Log(x);
return static_cast<T>(-0.36962844)
+ x
- static_cast<T>(0.97284858) * log_x
+ static_cast<T>(1.3437973) / log_x;
} else if (x < static_cast<T>(0)) {
T p = Sqrt(static_cast<T>(2.0) * (Exp(x + static_cast<T>(1)) + static_cast<T>(1)));
w = static_cast<T>(-1.0)
+ p * (static_cast<T>(1.0)
+ p * (static_cast<T>(-1.0 / 3.0)
+ p * static_cast<T>(11.0 / 72.0)));
} else {
w = x;
}
if (x > static_cast<T>(1.098612288668110)) {
w -= Log(w);
}
for (unsigned int i = 0u; i < 10u; i++) {
T e = Exp(w);
T t = w * e - Exp(x);
T p = w + static_cast<T>(1.);
t /= e * p - static_cast<T>(0.5) * (p + static_cast<T>(1.0)) * t / p;
w -= t;
if (Abs(t) < Epsilon<T>() * (static_cast<T>(1) + Abs(w)))
break;
}
return w;
}
// Find the root of a cubic x^3 + px^2 + qx + r = 0 with a single positive root.
// ref: http://math.stackexchange.com/questions/60376
template <typename T>
__DEVICE__ inline T CubicSolve(T p, T q, T r) {
T s = p / 3, s2 = s * s, s3 = s2 * s;
T a = -s2 + q / 3;
T b = s3 - s * q / 2 + r / 2;
T a3 = a * a * a;
T b2 = b * b;
if (a3 + b2 >= 0) {
T A = Pow(Sqrt(a3 + b2) - b, static_cast<T>(1) / 3);
return -s - a / A + A;
} else {
T A = Sqrt(-a3);
T B = Acos(-b / A);
T C = Pow(A, static_cast<T>(1) / 3);
return -s + (C - a / C) * Cos(B / 3);
}
}
} // namespace
// Proximal operator definitions.
//
// Each of the following functions corresponds to one of the Function enums.
// All functions accept one argument x and five parameters (a, b, c, d and rho)
// and returns the evaluation of
//
// x -> Prox{c * f(a * x - b) + d * x + e * x ^ 2},
//
// where Prox{.} is the proximal operator with penalty parameter rho.
template <typename T>
__DEVICE__ inline T ProxAbs(T v, T rho) {
return MaxPos(v - 1 / rho) - MaxNeg(v + 1 / rho);
}
template <typename T>
__DEVICE__ inline T ProxNegEntr(T v, T rho) {
// Use double precision.
return static_cast<T>(
LambertWExp<double>(
static_cast<double>((rho * v - 1) + Log(rho)))) / rho;
}
template <typename T>
__DEVICE__ inline T ProxExp(T v, T rho) {
return v - static_cast<T>(
LambertWExp<double>(static_cast<double>(v - Log(rho))));
}
template <typename T>
__DEVICE__ inline T ProxHuber(T v, T rho) {
return Abs(v) < 1 + 1 / rho ? v * rho / (1 + rho) : v - Sign(v) / rho;
}
template <typename T>
__DEVICE__ inline T ProxIdentity(T v, T rho) {
return v - 1 / rho;
}
template <typename T>
__DEVICE__ inline T ProxIndBox01(T v, T rho) {
return v <= 0 ? 0 : v >= 1 ? 1 : v;
}
template <typename T>
__DEVICE__ inline T ProxIndEq0(T v, T rho) {
return 0;
}
template <typename T>
__DEVICE__ inline T ProxIndGe0(T v, T rho) {
return v <= 0 ? 0 : v;
}
template <typename T>
__DEVICE__ inline T ProxIndLe0(T v, T rho) {
return v >= 0 ? 0 : v;
}
template <typename T>
__DEVICE__ inline T ProxLogistic(T v, T rho) {
// Initial guess based on piecewise approximation.
T x;
if (v < static_cast<T>(-2.5))
x = v;
else if (v > static_cast<T>(2.5) + 1 / rho)
x = v - 1 / rho;
else
x = (rho * v - static_cast<T>(0.5)) / (static_cast<T>(0.2) + rho);
// Newton iteration.
T l = v - 1 / rho, u = v;
for (unsigned int i = 0; i < 5; ++i) {
T inv_ex = 1 / (1 + Exp(-x));
T f = inv_ex + rho * (x - v);
T g = inv_ex * (1 - inv_ex) + rho;
if (f < 0)
l = x;
else
u = x;
x = x - f / g;
x = Min(x, u);
x = Max(x, l);
}
// Guarded method if not converged.
for (unsigned int i = 0; u - l > Tol<T>() && i < 100; ++i) {
T g_rho = 1 / (rho * (1 + Exp(-x))) + (x - v);
if (g_rho > 0) {
l = Max(l, x - g_rho);
u = x;
} else {
u = Min(u, x - g_rho);
l = x;
}
x = (u + l) / 2;
}
return x;
}
template <typename T>
__DEVICE__ inline T ProxMaxNeg0(T v, T rho) {
T z = v >= 0 ? v : 0;
return v + 1 / rho <= 0 ? v + 1 / rho : z;
}
template <typename T>
__DEVICE__ inline T ProxMaxPos0(T v, T rho) {
T z = v <= 0 ? v : 0;
return v >= 1 / rho ? v - 1 / rho : z;
}
template <typename T>
__DEVICE__ inline T ProxNegLog(T v, T rho) {
return (v + Sqrt(v * v + 4 / rho)) / 2;
}
template <typename T>
__DEVICE__ inline T ProxRecipr(T v, T rho) {
v = Max(v, static_cast<T>(0));
return CubicSolve(-v, static_cast<T>(0), -1 / rho);
}
template <typename T>
__DEVICE__ inline T ProxSquare(T v, T rho) {
return rho * v / (1 + rho);
}
template <typename T>
__DEVICE__ inline T ProxZero(T v, T rho) {
return v;
}
#define SMALL 1E-30 // ok for float or double for this purpose
// Evaluates the proximal operator of f.
template <typename T>
__DEVICE__ inline T ProxEval(const FunctionObj<T> &f_obj, T v, T rho) {
const T a = f_obj.a, b = f_obj.b, c = f_obj.c, d = f_obj.d, e = f_obj.e;
v = a * (v * rho - d) / (SMALL + e + rho) - b;
rho = (e + rho) / (SMALL + c * a * a); // Assumes c>=0 , as original paper assumes. This is so weight can be 0.
switch (f_obj.h) {
case kAbs: v = ProxAbs(v, rho); break;
case kNegEntr: v = ProxNegEntr(v, rho); break;
case kExp: v = ProxExp(v, rho); break;
case kHuber: v = ProxHuber(v, rho); break;
case kIdentity: v = ProxIdentity(v, rho); break;
case kIndBox01: v = ProxIndBox01(v, rho); break;
case kIndEq0: v = ProxIndEq0(v, rho); break;
case kIndGe0: v = ProxIndGe0(v, rho); break;
case kIndLe0: v = ProxIndLe0(v, rho); break;
case kLogistic: v = ProxLogistic(v, rho); break;
case kMaxNeg0: v = ProxMaxNeg0(v, rho); break;
case kMaxPos0: v = ProxMaxPos0(v, rho); break;
case kNegLog: v = ProxNegLog(v, rho); break;
case kRecipr: v = ProxRecipr(v, rho); break;
case kSquare: v = ProxSquare(v, rho); break;
case kZero: default: v = ProxZero(v, rho); break;
}
return (v + b) / (SMALL+a); // TODO: assumes a>=0, which is normal but not required by paper.
}
// Function definitions.
//
// Each of the following functions corresponds to one of the Function enums.
// All functions accept one argument x and four parameters (a, b, c, and d)
// and returns the evaluation of
//
// x -> c * f(a * x - b) + d * x.
template <typename T>
__DEVICE__ inline T FuncAbs(T x) {
return Abs(x);
}
template <typename T>
__DEVICE__ inline T FuncNegEntr(T x) {
return x <= 0 ? 0 : x * Log(x);
}
template <typename T>
__DEVICE__ inline T FuncExp(T x) {
return Exp(x);
}
template <typename T>
__DEVICE__ inline T FuncHuber(T x) {
T xabs = Abs(x);
T xabs2 = xabs * xabs;
return xabs < static_cast<T>(1) ? xabs2 / 2 : xabs - static_cast<T>(0.5);
}
template <typename T>
__DEVICE__ inline T FuncIdentity(T x) {
return x;
}
template <typename T>
__DEVICE__ inline T FuncIndBox01(T x) {
return 0;
}
template <typename T>
__DEVICE__ inline T FuncIndEq0(T x) {
return 0;
}
template <typename T>
__DEVICE__ inline T FuncIndGe0(T x) {
return 0;
}
template <typename T>
__DEVICE__ inline T FuncIndLe0(T x) {
return 0;
}
template <typename T>
__DEVICE__ inline T FuncLogistic(T x) {
return Log(1 + Exp(x));
}
template <typename T>
__DEVICE__ inline T FuncMaxNeg0(T x) {
return MaxNeg(x);
}
template <typename T>
__DEVICE__ inline T FuncMaxPos0(T x) {
return MaxPos(x);
}
template <typename T>
__DEVICE__ inline T FuncNegLog(T x) {
x = Max(static_cast<T>(0), x);
return -Log(x);
}
template <typename T>
__DEVICE__ inline T FuncRecpr(T x) {
x = Max(static_cast<T>(0), x);
return 1 / x;
}
template <typename T>
__DEVICE__ inline T FuncSquare(T x) {
return x * x / 2;
}
template <typename T>
__DEVICE__ inline T FuncZero(T x) {
return 0;
}
// Evaluates the function f.
template <typename T>
__DEVICE__ inline T FuncEval(const FunctionObj<T> &f_obj, T x) {
T dx = f_obj.d * x;
T ex = f_obj.e * x * x / 2;
x = f_obj.a * x - f_obj.b;
switch (f_obj.h) {
case kAbs: x = FuncAbs(x); break;
case kNegEntr: x = FuncNegEntr(x); break;
case kExp: x = FuncExp(x); break;
case kHuber: x = FuncHuber(x); break;
case kIdentity: x = FuncIdentity(x); break;
case kIndBox01: x = FuncIndBox01(x); break;
case kIndEq0: x = FuncIndEq0(x); break;
case kIndGe0: x = FuncIndGe0(x); break;
case kIndLe0: x = FuncIndLe0(x); break;
case kLogistic: x = FuncLogistic(x); break;
case kMaxNeg0: x = FuncMaxNeg0(x); break;
case kMaxPos0: x = FuncMaxPos0(x); break;
case kNegLog: x = FuncNegLog(x); break;
case kRecipr: x = FuncRecpr(x); break;
case kSquare: x = FuncSquare(x); break;
case kZero: default: x = FuncZero(x); break;
}
return f_obj.c * x + dx + ex;
}
// Projection onto subgradient definitions
//
// Each of the following functions corresponds to one of the Function enums.
// All functions accept one argument x and five parameters (a, b, c, d, and e)
// and returns the evaluation of
//
// x -> ProjSubgrad{c * f(a * x - b) + d * x + (1/2) e * x ^ 2},
//
// where ProjSubgrad{.} is the projection onto the subgradient of the function.
template <typename T>
__DEVICE__ inline T ProjSubgradAbs(T v, T x) {
if (x < static_cast<T>(0.))
return static_cast<T>(-1.);
else if (x > static_cast<T>(0.))
return static_cast<T>(1.);
else
return Max(static_cast<T>(-1.), Min(static_cast<T>(1.), v));
}
template <typename T>
__DEVICE__ inline T ProjSubgradNegEntr(T v, T x) {
return -Log(x) - static_cast<T>(1.);
}
template <typename T>
__DEVICE__ inline T ProjSubgradExp(T v, T x) {
return Exp(x);
}
template <typename T>
__DEVICE__ inline T ProjSubgradHuber(T v, T x) {
return Max(static_cast<T>(-1.), Min(static_cast<T>(1.), x));
}
template <typename T>
__DEVICE__ inline T ProjSubgradIdentity(T v, T x) {
return static_cast<T>(1.);
}
template <typename T>
__DEVICE__ inline T ProjSubgradIndBox01(T v, T x) {
if (x <= static_cast<T>(0.))
return Min(static_cast<T>(0.), v);
else if (x >= static_cast<T>(1.))
return Max(static_cast<T>(0.), v);
else
return static_cast<T>(0.);
}
template <typename T>
__DEVICE__ inline T ProjSubgradIndEq0(T v, T x) {
return v;
}
template <typename T>
__DEVICE__ inline T ProjSubgradIndGe0(T v, T x) {
if (x <= static_cast<T>(0.))
return Min(static_cast<T>(0.), v);
else
return static_cast<T>(0.);
}
template <typename T>
__DEVICE__ inline T ProjSubgradIndLe0(T v, T x) {
if (x >= static_cast<T>(0.))
return Max(static_cast<T>(0.), v);
else
return static_cast<T>(0.);
}
template <typename T>
__DEVICE__ inline T ProjSubgradLogistic(T v, T x) {
return Exp(x) / (static_cast<T>(1.) + Exp(x));
}
template <typename T>
__DEVICE__ inline T ProjSubgradMaxNeg0(T v, T x) {
if (x < static_cast<T>(0.))
return static_cast<T>(-1.);
else if (x > static_cast<T>(0.))
return static_cast<T>(0.);
else
return Min(static_cast<T>(0.), Max(static_cast<T>(-1.), v));
}
template <typename T>
__DEVICE__ inline T ProjSubgradMaxPos0(T v, T x) {
if (x < static_cast<T>(0.))
return static_cast<T>(0.);
else if (x > static_cast<T>(0.))
return static_cast<T>(1.);
else
return Min(static_cast<T>(1.), Max(static_cast<T>(0.), v));
}
template <typename T>
__DEVICE__ inline T ProjSubgradNegLog(T v, T x) {
return static_cast<T>(-1.) / x;
}
template <typename T>
__DEVICE__ inline T ProjSubgradRecipr(T v, T x) {
return static_cast<T>(1.) / (x * x);
}
template <typename T>
__DEVICE__ inline T ProjSubgradSquare(T v, T x) {
return x;
}
template <typename T>
__DEVICE__ inline T ProjSubgradZero(T v, T x) {
return static_cast<T>(0.);
}
// Evaluates the projection of v onto the subgradient of f at x.
template <typename T>
__DEVICE__ inline T ProjSubgradEval(const FunctionObj<T> &f_obj, T v, T x) {
const T a = f_obj.a, b = f_obj.b, c = f_obj.c, d = f_obj.d, e = f_obj.e;
if (a == static_cast<T>(0.) || c == static_cast<T>(0.))
return d + e * x;
v = static_cast<T>(1.) / (a * c) * (v - d - e * x);
T axb = a * x - b;
switch (f_obj.h) {
case kAbs: v = ProjSubgradAbs(v, axb); break;
case kNegEntr: v = ProjSubgradNegEntr(v, axb); break;
case kExp: v = ProjSubgradExp(v, axb); break;
case kHuber: v = ProjSubgradHuber(v, axb); break;
case kIdentity: v = ProjSubgradIdentity(v, axb); break;
case kIndBox01: v = ProjSubgradIndBox01(v, axb); break;
case kIndEq0: v = ProjSubgradIndEq0(v, axb); break;
case kIndGe0: v = ProjSubgradIndGe0(v, axb); break;
case kIndLe0: v = ProjSubgradIndLe0(v, axb); break;
case kLogistic: v = ProjSubgradLogistic(v, axb); break;
case kMaxNeg0: v = ProjSubgradMaxNeg0(v, axb); break;
case kMaxPos0: v = ProjSubgradMaxPos0(v, axb); break;
case kNegLog: v = ProjSubgradNegLog(v, axb); break;
case kRecipr: v = ProjSubgradRecipr(v, axb); break;
case kSquare: v = ProjSubgradSquare(v, axb); break;
case kZero: default: v = ProjSubgradZero(v, axb); break;
}
return a * c * v + d + e * x;
}
// Evaluates the proximal operator Prox{f_obj[i]}(x_in[i]) -> x_out[i].
//
// @param f_obj Vector of function objects.
// @param rho Penalty parameter.
// @param x_in Array to which proximal operator will be applied.
// @param x_out Array to which result will be written.
template <typename T>
void ProxEval(const std::vector<FunctionObj<T> > &f_obj, T rho, const T *x_in,
T *x_out) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (unsigned int i = 0; i < f_obj.size(); ++i)
x_out[i] = ProxEval(f_obj[i], x_in[i], rho);
}
// Returns evalution of Sum_i Func{f_obj[i]}(x_in[i]).
//
// @param f_obj Vector of function objects.
// @param x_in Array to which function will be applied.
// @param x_out Array to which result will be written.
// @returns Evaluation of sum of functions.
template <typename T>
T FuncEval(const std::vector<FunctionObj<T> > &f_obj, const T* x_in) {
T sum = 0;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for (unsigned int i = 0; i < f_obj.size(); ++i)
sum += FuncEval(f_obj[i], x_in[i]);
return sum;
}
// Projection onto the subgradient at x_in
// ProjSubgrad{f_obj[i]}(x_in[i], v_in[i]) -> x_out[i].
//
// @param f_obj Vector of function objects.
// @param x_in Array of points at which subgradient should be evaluated.
// @param v_in Array of points that should be projected onto the subgradient.
// @param v_out Array to which result will be written.
template <typename T>
void ProjSubgradEval(const std::vector<FunctionObj<T> > &f_obj, const T *x_in,
const T *v_in, T *v_out) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (unsigned int i = 0; i < f_obj.size(); ++i)
v_out[i] = ProjSubgradEval(f_obj[i], v_in[i], x_in[i]);
}
#ifdef __CUDACC__
template <typename T>
struct ProxEvalF : thrust::binary_function<FunctionObj<T>, T, T> {
T rho;
__device__ ProxEvalF(T rho) : rho(rho) { }
__device__ T operator()(const FunctionObj<T> &f_obj, T x) {
return ProxEval(f_obj, x, rho);
}
};
template <typename T>
void ProxEval(const thrust::device_vector<FunctionObj<T> > &f_obj, T rho,
const T *x_in, T *x_out) {
thrust::transform(thrust::device, f_obj.cbegin(), f_obj.cend(),
thrust::device_pointer_cast(x_in), thrust::device_pointer_cast(x_out),
ProxEvalF<T>(rho));
}
template <typename T>
struct FuncEvalF : thrust::binary_function<FunctionObj<T>, T, T> {
__device__ T operator()(const FunctionObj<T> &f_obj, T x) {
return FuncEval(f_obj, x);
}
};
template <typename T>
T FuncEval(const thrust::device_vector<FunctionObj<T> > &f_obj, const T *x_in) {
return thrust::inner_product(f_obj.cbegin(), f_obj.cend(),
thrust::device_pointer_cast(x_in), static_cast<T>(0), thrust::plus<T>(),
FuncEvalF<T>());
}
template <typename T>
struct ProjSubgradF {
__device__ T operator()(const FunctionObj<T> &f_obj,
const thrust::tuple<T, T>& vx) {
return ProjSubgradEval(f_obj, thrust::get<0>(vx), thrust::get<1>(vx));
}
};
template <typename T>
void ProjSubgradEval(const thrust::device_vector<FunctionObj<T> > &f_obj,
const T *v_in, const T *x_in, T *v_out) {
thrust::transform(thrust::device, f_obj.cbegin(), f_obj.cend(),
thrust::make_zip_iterator(thrust::make_tuple(
thrust::device_pointer_cast(v_in),
thrust::device_pointer_cast(x_in))),
thrust::device_pointer_cast(v_out), ProjSubgradF<T>());
}
#endif // __CUDACC__
#endif // PROX_LIB_H_
|
utils.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
#pragma once
#include <fcntl.h>
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <string>
#include <memory>
#include <random>
#include <set>
#ifdef __APPLE__
#else
#include <malloc.h>
#endif
#ifdef _WINDOWS
#include <Windows.h>
typedef HANDLE FileHandle;
#else
#include <unistd.h>
typedef int FileHandle;
#endif
#include "logger.h"
#include "cached_io.h"
#include "common_includes.h"
#include "windows_customizations.h"
#ifdef EXEC_ENV_OLS
#include "content_buf.h"
#include "memory_mapped_files.h"
#endif
// taken from
// https://github.com/Microsoft/BLAS-on-flash/blob/master/include/utils.h
// round up X to the nearest multiple of Y
#define ROUND_UP(X, Y) \
((((uint64_t)(X) / (Y)) + ((uint64_t)(X) % (Y) != 0)) * (Y))
#define DIV_ROUND_UP(X, Y) (((uint64_t)(X) / (Y)) + ((uint64_t)(X) % (Y) != 0))
// round down X to the nearest multiple of Y
#define ROUND_DOWN(X, Y) (((uint64_t)(X) / (Y)) * (Y))
// alignment tests
#define IS_ALIGNED(X, Y) ((uint64_t)(X) % (uint64_t)(Y) == 0)
#define IS_512_ALIGNED(X) IS_ALIGNED(X, 512)
#define IS_4096_ALIGNED(X) IS_ALIGNED(X, 4096)
typedef uint64_t _u64;
typedef int64_t _s64;
typedef uint32_t _u32;
typedef int32_t _s32;
typedef uint16_t _u16;
typedef int16_t _s16;
typedef uint8_t _u8;
typedef int8_t _s8;
namespace diskann {
static const size_t MAX_SIZE_OF_STREAMBUF = 2LL * 1024 * 1024 * 1024;
enum Metric { L2 = 0, INNER_PRODUCT = 1, FAST_L2 = 2, PQ = 3 };
inline void alloc_aligned(void** ptr, size_t size, size_t align) {
*ptr = nullptr;
assert(IS_ALIGNED(size, align));
#ifndef _WINDOWS
*ptr = ::aligned_alloc(align, size);
#else
*ptr = ::_aligned_malloc(size, align); // note the swapped arguments!
#endif
assert(*ptr != nullptr);
}
inline void aligned_free(void* ptr) {
// Gopal. Must have a check here if the pointer was actually allocated by
// _alloc_aligned
if (ptr == nullptr) {
return;
}
#ifndef _WINDOWS
free(ptr);
#else
::_aligned_free(ptr);
#endif
}
inline void GenRandom(std::mt19937& rng, unsigned* addr, unsigned size,
unsigned N) {
for (unsigned i = 0; i < size; ++i) {
addr[i] = rng() % (N - size);
}
std::sort(addr, addr + size);
for (unsigned i = 1; i < size; ++i) {
if (addr[i] <= addr[i - 1]) {
addr[i] = addr[i - 1] + 1;
}
}
unsigned off = rng() % N;
for (unsigned i = 0; i < size; ++i) {
addr[i] = (addr[i] + off) % N;
}
}
// get_bin_metadata functions START
inline void get_bin_metadata_impl(std::basic_istream<char>& reader,
size_t& nrows, size_t& ncols) {
int nrows_32, ncols_32;
reader.read((char*) &nrows_32, sizeof(int));
reader.read((char*) &ncols_32, sizeof(int));
nrows = nrows_32;
ncols = ncols_32;
}
#ifdef EXEC_ENV_OLS
inline void get_bin_metadata(MemoryMappedFiles& files,
const std::string& bin_file, size_t& nrows,
size_t& ncols) {
diskann::cout << "Getting metadata for file: " << bin_file << std::endl;
auto fc = files.getContent(bin_file);
auto cb = ContentBuf((char*) fc._content, fc._size);
std::basic_istream<char> reader(&cb);
get_bin_metadata_impl(reader, nrows, ncols);
}
#endif
inline void get_bin_metadata(const std::string& bin_file, size_t& nrows,
size_t& ncols) {
std::ifstream reader(bin_file.c_str(), std::ios::binary);
get_bin_metadata_impl(reader, nrows, ncols);
}
// get_bin_metadata functions END
template<typename T>
inline std::string getValues(T* data, size_t num) {
std::stringstream stream;
stream << "[";
for (size_t i = 0; i < num; i++) {
stream << std::to_string(data[i]) << ",";
}
stream << "]" << std::endl;
return stream.str();
}
// load_bin functions START
template<typename T>
inline void load_bin_impl(std::basic_istream<char>& reader,
size_t actual_file_size, T*& data, size_t& npts,
size_t& dim) {
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
dim = (unsigned) dim_i32;
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << "..."
<< std::endl;
size_t expected_actual_file_size =
npts * dim * sizeof(T) + 2 * sizeof(uint32_t);
if (actual_file_size != expected_actual_file_size) {
std::stringstream stream;
stream << "Error. File size mismatch. Actual size is " << actual_file_size
<< " while expected size is " << expected_actual_file_size
<< " npts = " << npts << " dim = " << dim
<< " size of <T>= " << sizeof(T) << std::endl;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
data = new T[npts * dim];
reader.read((char*) data, npts * dim * sizeof(T));
// diskann::cout << "Last bytes: "
// << getValues<T>(data + (npts - 2) * dim, dim);
// diskann::cout << "Finished reading bin file." << std::endl;
}
#ifdef EXEC_ENV_OLS
template<typename T>
inline void load_bin(MemoryMappedFiles& files, const std::string& bin_file,
T*& data, size_t& npts, size_t& dim) {
diskann::cout << "Reading bin file " << bin_file.c_str() << " ..."
<< std::endl;
auto fc = files.getContent(bin_file);
uint32_t t_npts, t_dim;
uint32_t* contentAsIntPtr = (uint32_t*) (fc._content);
t_npts = *(contentAsIntPtr);
t_dim = *(contentAsIntPtr + 1);
npts = t_npts;
dim = t_dim;
auto actual_file_size = npts * dim * sizeof(T) + 2 * sizeof(uint32_t);
if (actual_file_size != fc._size) {
std::stringstream stream;
stream << "Error. File size mismatch. Actual size is " << fc._size
<< " while expected size is " << actual_file_size
<< " npts = " << npts << " dim = " << dim
<< " size of <T>= " << sizeof(T) << std::endl;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
data =
(T*) ((char*) fc._content + 2 * sizeof(uint32_t)); // No need to copy!
}
#endif
inline void wait_for_keystroke() {
int a;
std::cout << "Press any number to continue.." << std::endl;
std::cin >> a;
}
template<typename T>
inline void load_bin(const std::string& bin_file, T*& data, size_t& npts,
size_t& dim) {
// OLS
//_u64 read_blk_size = 64 * 1024 * 1024;
// cached_ifstream reader(bin_file, read_blk_size);
// size_t actual_file_size = reader.get_file_size();
// END OLS
diskann::cout << "Reading bin file " << bin_file.c_str() << " ..."
<< std::endl;
std::ifstream reader(bin_file, std::ios::binary | std::ios::ate);
uint64_t fsize = reader.tellg();
reader.seekg(0);
load_bin_impl<T>(reader, fsize, data, npts, dim);
}
// load_bin functions END
inline void load_truthset(const std::string& bin_file, uint32_t*& ids,
float*& dists, size_t& npts, size_t& dim) {
_u64 read_blk_size = 64 * 1024 * 1024;
cached_ifstream reader(bin_file, read_blk_size);
diskann::cout << "Reading truthset file " << bin_file.c_str() << " ..."
<< std::endl;
size_t actual_file_size = reader.get_file_size();
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
dim = (unsigned) dim_i32;
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << "..."
<< std::endl;
int truthset_type = -1; // 1 means truthset has ids and distances, 2 means
// only ids, -1 is error
size_t expected_file_size_with_dists =
2 * npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t);
if (actual_file_size == expected_file_size_with_dists)
truthset_type = 1;
size_t expected_file_size_just_ids =
npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t);
if (actual_file_size == expected_file_size_just_ids)
truthset_type = 2;
if (truthset_type == -1) {
std::stringstream stream;
stream << "Error. File size mismatch. File should have bin format, with "
"npts followed by ngt followed by npts*ngt ids and optionally "
"followed by npts*ngt distance values; actual size: "
<< actual_file_size
<< ", expected: " << expected_file_size_with_dists << " or "
<< expected_file_size_just_ids;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
ids = new uint32_t[npts * dim];
reader.read((char*) ids, npts * dim * sizeof(uint32_t));
if (truthset_type == 1) {
dists = new float[npts * dim];
reader.read((char*) dists, npts * dim * sizeof(float));
}
}
inline void prune_truthset_for_range(const std::string& bin_file, float range, std::vector<std::vector<_u32>> &groundtruth,
size_t& npts) {
_u64 read_blk_size = 64 * 1024 * 1024;
cached_ifstream reader(bin_file, read_blk_size);
diskann::cout << "Reading truthset file " << bin_file.c_str() << " ..."
<< std::endl;
size_t actual_file_size = reader.get_file_size();
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
_u64 dim = (unsigned) dim_i32;
_u32* ids;
float* dists;
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << "..."
<< std::endl;
int truthset_type = -1; // 1 means truthset has ids and distances, 2 means
// only ids, -1 is error
size_t expected_file_size_with_dists =
2 * npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t);
if (actual_file_size == expected_file_size_with_dists)
truthset_type = 1;
if (truthset_type == -1) {
std::stringstream stream;
stream << "Error. File size mismatch. File should have bin format, with "
"npts followed by ngt followed by npts*ngt ids and optionally "
"followed by npts*ngt distance values; actual size: "
<< actual_file_size
<< ", expected: " << expected_file_size_with_dists;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
ids = new uint32_t[npts * dim];
reader.read((char*) ids, npts * dim * sizeof(uint32_t));
if (truthset_type == 1) {
dists = new float[npts * dim];
reader.read((char*) dists, npts * dim * sizeof(float));
}
float min_dist = std::numeric_limits<float>::max();
float max_dist = 0;
groundtruth.resize(npts);
for (_u32 i = 0; i < npts; i++) {
groundtruth[i].clear();
for (_u32 j = 0; j < dim; j++) {
if (dists[i*dim + j] <= range) {
groundtruth[i].emplace_back(ids[i*dim+j]);
}
min_dist = min_dist > dists[i*dim+j] ? dists[i*dim + j] : min_dist;
max_dist = max_dist < dists[i*dim+j] ? dists[i*dim + j] : max_dist;
}
//std::cout<<groundtruth[i].size() << " " ;
}
std::cout<<"Min dist: " << min_dist <<", Max dist: "<< max_dist << std::endl;
delete[] ids;
delete[] dists;
}
inline void load_range_truthset(const std::string& bin_file, std::vector<std::vector<_u32>> &groundtruth, _u64 & gt_num) {
_u64 read_blk_size = 64 * 1024 * 1024;
cached_ifstream reader(bin_file, read_blk_size);
diskann::cout << "Reading truthset file " << bin_file.c_str() << " ..."
<< std::endl;
size_t actual_file_size = reader.get_file_size();
int npts_u32, total_u32;
reader.read((char*) &npts_u32, sizeof(int));
reader.read((char*) &total_u32, sizeof(int));
gt_num = (_u64) npts_u32;
_u64 total_res = (_u64) total_u32;
diskann::cout << "Metadata: #pts = " << gt_num << ", #total_results = " << total_res << "..."
<< std::endl;
size_t expected_file_size =
2*sizeof(_u32) + gt_num*sizeof(_u32) + total_res*sizeof(_u32);
if (actual_file_size != expected_file_size) {
std::stringstream stream;
stream << "Error. File size mismatch in range truthset. actual size: "
<< actual_file_size
<< ", expected: " << expected_file_size;
diskann::cout << stream.str();
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
groundtruth.clear();
groundtruth.resize(gt_num);
std::vector<_u32> gt_count(gt_num);
reader.read((char*) gt_count.data(), sizeof(_u32)*gt_num);
std::vector<_u32> gt_stats(gt_count);
std::sort(gt_stats.begin(), gt_stats.end());
std::cout<<"GT count percentiles:" << std::endl;
for (_u32 p = 0; p < 100; p += 5)
std::cout << "percentile " << p << ": "
<< gt_stats[std::floor((p / 100.0) * gt_num)] << std::endl;
std::cout << "percentile 100"
<< ": " << gt_stats[gt_num - 1] << std::endl;
for (_u32 i = 0; i < gt_num; i++) {
groundtruth[i].clear();
groundtruth[i].resize(gt_count[i]);
if (gt_count[i]!=0)
reader.read((char*) groundtruth[i].data(), sizeof(_u32)*gt_count[i]);
// debugging code
/* if (i < 10) {
std::cout<<gt_count[i] <<" nbrs, ids: ";
for (auto &x : groundtruth[i])
std::cout<<x <<" ";
std::cout<<std::endl;
} */
}
}
#ifdef EXEC_ENV_OLS
template<typename T>
inline void load_bin(MemoryMappedFiles& files, const std::string& bin_file,
std::unique_ptr<T[]>& data, size_t& npts, size_t& dim) {
T* ptr;
load_bin<T>(files, bin_file, ptr, npts, dim);
data.reset(ptr);
}
#endif
template<typename T>
inline void load_bin(const std::string& bin_file, std::unique_ptr<T[]>& data,
size_t& npts, size_t& dim) {
T* ptr;
load_bin<T>(bin_file, ptr, npts, dim);
data.reset(ptr);
}
template<typename T>
inline void save_bin(const std::string& filename, T* data, size_t npts,
size_t ndims) {
std::ofstream writer(filename, std::ios::binary | std::ios::out);
diskann::cout << "Writing bin: " << filename.c_str() << std::endl;
int npts_i32 = (int) npts, ndims_i32 = (int) ndims;
writer.write((char*) &npts_i32, sizeof(int));
writer.write((char*) &ndims_i32, sizeof(int));
diskann::cout << "bin: #pts = " << npts << ", #dims = " << ndims
<< ", size = " << npts * ndims * sizeof(T) + 2 * sizeof(int)
<< "B" << std::endl;
// data = new T[npts_u64 * ndims_u64];
writer.write((char*) data, npts * ndims * sizeof(T));
writer.close();
diskann::cout << "Finished writing bin." << std::endl;
}
// load_aligned_bin functions START
template<typename T>
inline void load_aligned_bin_impl(std::basic_istream<char>& reader,
size_t actual_file_size, T*& data,
size_t& npts, size_t& dim,
size_t& rounded_dim) {
int npts_i32, dim_i32;
reader.read((char*) &npts_i32, sizeof(int));
reader.read((char*) &dim_i32, sizeof(int));
npts = (unsigned) npts_i32;
dim = (unsigned) dim_i32;
size_t expected_actual_file_size =
npts * dim * sizeof(T) + 2 * sizeof(uint32_t);
if (actual_file_size != expected_actual_file_size) {
std::stringstream stream;
stream << "Error. File size mismatch. Actual size is " << actual_file_size
<< " while expected size is " << expected_actual_file_size
<< " npts = " << npts << " dim = " << dim
<< " size of <T>= " << sizeof(T) << std::endl;
diskann::cout << stream.str() << std::endl;
throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__,
__LINE__);
}
rounded_dim = ROUND_UP(dim, 8);
diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim
<< ", aligned_dim = " << rounded_dim << "..." << std::flush;
size_t allocSize = npts * rounded_dim * sizeof(T);
diskann::cout << "allocating aligned memory, " << allocSize << " bytes..."
<< std::flush;
alloc_aligned(((void**) &data), allocSize, 8 * sizeof(T));
diskann::cout << "done. Copying data..." << std::flush;
for (size_t i = 0; i < npts; i++) {
reader.read((char*) (data + i * rounded_dim), dim * sizeof(T));
memset(data + i * rounded_dim + dim, 0, (rounded_dim - dim) * sizeof(T));
}
diskann::cout << " done." << std::endl;
}
#ifdef EXEC_ENV_OLS
template<typename T>
inline void load_aligned_bin(MemoryMappedFiles& files,
const std::string& bin_file, T*& data,
size_t& npts, size_t& dim, size_t& rounded_dim) {
diskann::cout << "Reading bin file " << bin_file << " ..." << std::flush;
FileContent fc = files.getContent(bin_file);
ContentBuf buf((char*) fc._content, fc._size);
std::basic_istream<char> reader(&buf);
size_t actual_file_size = fc._size;
load_aligned_bin_impl(reader, actual_file_size, data, npts, dim,
rounded_dim);
}
#endif
template<typename T>
inline void load_aligned_bin(const std::string& bin_file, T*& data,
size_t& npts, size_t& dim, size_t& rounded_dim) {
diskann::cout << "Reading bin file " << bin_file << " ..." << std::flush;
// START OLS
//_u64 read_blk_size = 64 * 1024 * 1024;
// cached_ifstream reader(bin_file, read_blk_size);
// size_t actual_file_size = reader.get_file_size();
// END OLS
std::ifstream reader(bin_file, std::ios::binary | std::ios::ate);
uint64_t fsize = reader.tellg();
reader.seekg(0);
load_aligned_bin_impl(reader, fsize, data, npts, dim, rounded_dim);
}
template<typename InType, typename OutType>
void convert_types(const InType* srcmat, OutType* destmat, size_t npts,
size_t dim) {
#pragma omp parallel for schedule(static, 65536)
for (int64_t i = 0; i < (_s64) npts; i++) {
for (uint64_t j = 0; j < dim; j++) {
destmat[i * dim + j] = (OutType) srcmat[i * dim + j];
}
}
}
// this function will take in_file of n*d dimensions and save the output as a
// floating point matrix
// with n*(d+1) dimensions. All vectors are scaled by a large value M so that
// the norms are <=1 and the final coordinate is set so that the resulting
// norm (in d+1 coordinates) is equal to 1 this is a classical transformation
// from MIPS to L2 search from "On Symmetric and Asymmetric LSHs for Inner
// Product Search" by Neyshabur and Srebro
template<typename T>
float prepare_base_for_inner_products(const std::string in_file,
const std::string out_file) {
std::cout << "Pre-processing base file by adding extra coordinate"
<< std::endl;
std::ifstream in_reader(in_file.c_str(), std::ios::binary);
std::ofstream out_writer(out_file.c_str(), std::ios::binary);
_u64 npts, in_dims, out_dims;
float max_norm = 0;
_u32 npts32, dims32;
in_reader.read((char*) &npts32, sizeof(uint32_t));
in_reader.read((char*) &dims32, sizeof(uint32_t));
npts = npts32;
in_dims = dims32;
out_dims = in_dims + 1;
_u32 outdims32 = (_u32) out_dims;
out_writer.write((char*) &npts32, sizeof(uint32_t));
out_writer.write((char*) &outdims32, sizeof(uint32_t));
size_t BLOCK_SIZE = 100000;
size_t block_size = npts <= BLOCK_SIZE ? npts : BLOCK_SIZE;
std::unique_ptr<T[]> in_block_data =
std::make_unique<T[]>(block_size * in_dims);
std::unique_ptr<float[]> out_block_data =
std::make_unique<float[]>(block_size * out_dims);
std::memset(out_block_data.get(), 0, sizeof(float) * block_size * out_dims);
_u64 num_blocks = DIV_ROUND_UP(npts, block_size);
std::vector<float> norms(npts, 0);
for (_u64 b = 0; b < num_blocks; b++) {
_u64 start_id = b * block_size;
_u64 end_id = (b + 1) * block_size < npts ? (b + 1) * block_size : npts;
_u64 block_pts = end_id - start_id;
in_reader.read((char*) in_block_data.get(),
block_pts * in_dims * sizeof(T));
for (_u64 p = 0; p < block_pts; p++) {
for (_u64 j = 0; j < in_dims; j++) {
norms[start_id + p] +=
in_block_data[p * in_dims + j] * in_block_data[p * in_dims + j];
}
max_norm =
max_norm > norms[start_id + p] ? max_norm : norms[start_id + p];
}
}
max_norm = std::sqrt(max_norm);
in_reader.seekg(2 * sizeof(_u32), std::ios::beg);
for (_u64 b = 0; b < num_blocks; b++) {
_u64 start_id = b * block_size;
_u64 end_id = (b + 1) * block_size < npts ? (b + 1) * block_size : npts;
_u64 block_pts = end_id - start_id;
in_reader.read((char*) in_block_data.get(),
block_pts * in_dims * sizeof(T));
for (_u64 p = 0; p < block_pts; p++) {
for (_u64 j = 0; j < in_dims; j++) {
out_block_data[p * out_dims + j] =
in_block_data[p * in_dims + j] / max_norm;
}
float res = 1 - (norms[start_id + p] / (max_norm * max_norm));
res = res <= 0 ? 0 : std::sqrt(res);
out_block_data[p * out_dims + out_dims - 1] = res;
}
out_writer.write((char*) out_block_data.get(),
block_pts * out_dims * sizeof(float));
}
out_writer.close();
return max_norm;
}
// plain saves data as npts X ndims array into filename
template<typename T>
void save_Tvecs(const char* filename, T* data, size_t npts, size_t ndims) {
std::string fname(filename);
// create cached ofstream with 64MB cache
cached_ofstream writer(fname, 64 * 1048576);
unsigned dims_u32 = (unsigned) ndims;
// start writing
for (uint64_t i = 0; i < npts; i++) {
// write dims in u32
writer.write((char*) &dims_u32, sizeof(unsigned));
// get cur point in data
T* cur_pt = data + i * ndims;
writer.write((char*) cur_pt, ndims * sizeof(T));
}
}
// NOTE :: good efficiency when total_vec_size is integral multiple of 64
inline void prefetch_vector(const char* vec, size_t vecsize) {
size_t max_prefetch_size = (vecsize / 64) * 64;
for (size_t d = 0; d < max_prefetch_size; d += 64)
_mm_prefetch((const char*) vec + d, _MM_HINT_T0);
}
// NOTE :: good efficiency when total_vec_size is integral multiple of 64
inline void prefetch_vector_l2(const char* vec, size_t vecsize) {
size_t max_prefetch_size = (vecsize / 64) * 64;
for (size_t d = 0; d < max_prefetch_size; d += 64)
_mm_prefetch((const char*) vec + d, _MM_HINT_T1);
}
}; // namespace diskann
struct PivotContainer {
PivotContainer() = default;
PivotContainer(size_t pivo_id, float pivo_dist)
: piv_id{pivo_id}, piv_dist{pivo_dist} {
}
bool operator<(const PivotContainer& p) const {
return p.piv_dist < piv_dist;
}
bool operator>(const PivotContainer& p) const {
return p.piv_dist > piv_dist;
}
size_t piv_id;
float piv_dist;
};
inline bool file_exists(const std::string& name) {
struct stat buffer;
auto val = stat(name.c_str(), &buffer);
diskann::cout << " Stat(" << name.c_str() << ") returned: " << val
<< std::endl;
return (val == 0);
}
inline _u64 get_file_size(const std::string& fname) {
std::ifstream reader(fname, std::ios::binary | std::ios::ate);
if (!reader.fail() && reader.is_open()) {
_u64 end_pos = reader.tellg();
diskann::cout << " Tellg: " << reader.tellg() << " as u64: " << end_pos
<< std::endl;
reader.close();
return end_pos;
} else {
diskann::cout << "Could not open file: " << fname << std::endl;
return 0;
}
}
inline bool validate_file_size(const std::string& name) {
std::ifstream in(std::string(name), std::ios::binary);
in.seekg(0, in.end);
size_t actual_file_size = in.tellg();
in.seekg(0, in.beg);
size_t expected_file_size;
in.read((char*) &expected_file_size, sizeof(uint64_t));
if (actual_file_size != expected_file_size) {
diskann::cout << "Error loading" << name
<< ". Expected "
"size (metadata): "
<< expected_file_size
<< ", actual file size : " << actual_file_size
<< ". Exitting." << std::endl;
in.close();
return false;
}
in.close();
return true;
}
#ifdef _WINDOWS
#include <intrin.h>
#include <Psapi.h>
inline void printProcessMemory(const char* message) {
PROCESS_MEMORY_COUNTERS counters;
HANDLE h = GetCurrentProcess();
GetProcessMemoryInfo(h, &counters, sizeof(counters));
diskann::cout << message << " [Peaking Working Set size: "
<< counters.PeakWorkingSetSize * 1.0 / (1024 * 1024 * 1024)
<< "GB Working set size: "
<< counters.WorkingSetSize * 1.0 / (1024 * 1024 * 1024)
<< "GB Private bytes "
<< counters.PagefileUsage * 1.0 / (1024 * 1024 * 1024) << "GB]"
<< std::endl;
}
#else
// need to check and change this
inline bool avx2Supported() {
return true;
}
inline void printProcessMemory(const char* message) {
diskann::cout << message << std::endl;
}
#endif
extern bool AvxSupportedCPU;
extern bool Avx2SupportedCPU;
|
neuron.h | /*
* Architektury výpočetních systémů (AVS 2019)
* Projekt c. 1 (ANN)
* Login: xstupi00
*/
/**
* @brief Returns output of the neuron as product of inputs, sums and bias
* @param inputSize - number of inputs the neuron
* @param input - pointer to neuron input array (identical for all neurons in the layer)
* @param weights - pointer to weights for all neurons in the layer
* @param bias - bias value of the neuron
* @return Output of the neuron
*/
// #pragma omp declare simd uniform(inputSize, input) linear(weight:512) simdlen(8) notinbranch
// #pragma omp declare simd uniform(inputSize, input) linear(weight:784) simdlen(8) notinbranch
// #pragma omp declare simd uniform(inputSize, input) linear(weight) simdlen(8) notinbranch
float evalNeuron(
size_t inputSize,
const float* input,
const float* weight,
float bias
);
|
GB_unaryop__minv_int64_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int64_uint64
// op(A') function: GB_tran__minv_int64_uint64
// C type: int64_t
// A type: uint64_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 64)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 64) ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT64 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int64_uint64
(
int64_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__le_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__le_int16
// A.*B function (eWiseMult): GB_AemultB__le_int16
// A*D function (colscale): GB_AxD__le_int16
// D*A function (rowscale): GB_DxB__le_int16
// C+=B function (dense accum): GB_Cdense_accumB__le_int16
// C+=b function (dense accum): GB_Cdense_accumb__le_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__le_int16
// C=scalar+B GB_bind1st__le_int16
// C=scalar+B' GB_bind1st_tran__le_int16
// C=A+scalar GB_bind2nd__le_int16
// C=A'+scalar GB_bind2nd_tran__le_int16
// C type: bool
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x <= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_INT16 || GxB_NO_LE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__le_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__le_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__le_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__le_int16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__le_int16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__le_int16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__le_int16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__le_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__le_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB_bind1st_tran__le_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB_bind2nd_tran__le_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ep.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - EP
This benchmark is an OpenMP C version of the NPB EP code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Author: P. O. Frederickson
D. H. Bailey
A. C. Woo
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
#include "npb-C.h"
#include "npbparams.h"
/* parameters */
#define MK 16
#define MM (M - MK)
#define NN (1 << MM)
#define NK (1 << MK)
#define NQ 10
#define EPSILON 1.0e-8
#define A 1220703125.0
#define S 271828183.0
#define TIMERS_ENABLED FALSE
/* global variables */
/* common /storage/ */
static double x[2*NK];
#pragma omp threadprivate(x)
static double q[NQ];
/*--------------------------------------------------------------------
program EMBAR
c-------------------------------------------------------------------*/
/*
c This is the serial version of the APP Benchmark 1,
c the "embarassingly parallel" benchmark.
c
c M is the Log_2 of the number of complex pairs of uniform (0, 1) random
c numbers. MK is the Log_2 of the size of each batch of uniform random
c numbers. MK can be set for convenience on a given system, since it does
c not affect the results.
*/
int main(int argc, char **argv) {
double Mops, t1, t2, t3, t4, x1, x2, sx, sy, tm, an, tt, gc;
double dum[3] = { 1.0, 1.0, 1.0 };
int np, ierr, node, no_nodes, i, ik, kk, l, k, nit, ierrcode,
no_large_nodes, np_add, k_offset, j;
int nthreads = 1;
boolean verified;
char size[13+1]; /* character*13 */
/*
c Because the size of the problem is too large to store in a 32-bit
c integer for some classes, we put it into a string (for printing).
c Have to strip off the decimal point put in there by the floating
c point print statement (internal file)
*/
#ifndef POSIX
#ifndef NOBOMP
bomp_custom_init();
#endif
#endif
omp_set_num_threads(1);
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - EP Benchmark\n");
sprintf(size, "%12.0f", pow(2.0, M+1));
for (j = 13; j >= 1; j--) {
if (size[j] == '.') size[j] = ' ';
}
printf(" Number of random numbers generated: %13s\n", size);
verified = FALSE;
/*
c Compute the number of "batches" of random number pairs generated
c per processor. Adjust if the number of processors does not evenly
c divide the total number
*/
np = NN;
/*
c Call the random number generator functions and initialize
c the x-array to reduce the effects of paging on the timings.
c Also, call all mathematical functions that are used. Make
c sure these initializations cannot be eliminated as dead code.
*/
vranlc(0, &(dum[0]), dum[1], &(dum[2]));
dum[0] = randlc(&(dum[1]), dum[2]);
for (i = 0; i < 2*NK; i++)
{
x[i] = -1.0e99;
}
printf("Reached here ");
Mops = log(sqrt(fabs(max(1.0, 1.0))));
timer_clear(1);
timer_clear(2);
timer_clear(3);
timer_start(1);
vranlc(0, &t1, A, x);
/* Compute AN = A ^ (2 * NK) (mod 2^46). */
t1 = A;
for ( i = 1; i <= MK+1; i++) {
t2 = randlc(&t1, t1);
}
an = t1;
tt = S;
gc = 0.0;
sx = 0.0;
sy = 0.0;
for ( i = 0; i <= NQ - 1; i++) {
q[i] = 0.0;
}
/*
c Each instance of this loop may be performed independently. We compute
c the k offsets separately to take into account the fact that some nodes
c have more numbers to generate than others
*/
k_offset = -1;
#pragma omp parallel copyin(x)
{
double t1, t2, t3, t4, x1, x2;
int kk, i, ik, l;
double qq[NQ]; /* private copy of q[0:NQ-1] */
for (i = 0; i < NQ; i++) qq[i] = 0.0;
#pragma omp for reduction(+:sx,sy) schedule(static)
for (k = 1; k <= np; k++) {
kk = k_offset + k;
t1 = S;
t2 = an;
/* Find starting seed t1 for this kk. */
for (i = 1; i <= 100; i++) {
ik = kk / 2;
if (2 * ik != kk) t3 = randlc(&t1, t2);
if (ik == 0) break;
t3 = randlc(&t2, t2);
kk = ik;
}
/* Compute uniform pseudorandom numbers. */
if (TIMERS_ENABLED == TRUE) timer_start(3);
vranlc(2*NK, &t1, A, x-1);
if (TIMERS_ENABLED == TRUE) timer_stop(3);
/*
c Compute Gaussian deviates by acceptance-rejection method and
c tally counts in concentric square annuli. This loop is not
c vectorizable.
*/
if (TIMERS_ENABLED == TRUE) timer_start(2);
for ( i = 0; i < NK; i++) {
x1 = 2.0 * x[2*i] - 1.0;
x2 = 2.0 * x[2*i+1] - 1.0;
t1 = pow2(x1) + pow2(x2);
if (t1 <= 1.0) {
t2 = sqrt(-2.0 * log(t1) / t1);
t3 = (x1 * t2); /* Xi */
t4 = (x2 * t2); /* Yi */
l = max(fabs(t3), fabs(t4));
qq[l] += 1.0; /* counts */
sx = sx + t3; /* sum of Xi */
sy = sy + t4; /* sum of Yi */
}
}
if (TIMERS_ENABLED == TRUE) timer_stop(2);
}
#pragma omp critical
{
for (i = 0; i <= NQ - 1; i++) q[i] += qq[i];
}
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end of parallel region */
for (i = 0; i <= NQ-1; i++) {
gc = gc + q[i];
}
timer_stop(1);
tm = timer_read(1);
nit = 0;
if (M == 24) {
if((fabs((sx- (-3.247834652034740e3))/sx) <= EPSILON) &&
(fabs((sy- (-6.958407078382297e3))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 25) {
if ((fabs((sx- (-2.863319731645753e3))/sx) <= EPSILON) &&
(fabs((sy- (-6.320053679109499e3))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 28) {
if ((fabs((sx- (-4.295875165629892e3))/sx) <= EPSILON) &&
(fabs((sy- (-1.580732573678431e4))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 30) {
if ((fabs((sx- (4.033815542441498e4))/sx) <= EPSILON) &&
(fabs((sy- (-2.660669192809235e4))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 32) {
if ((fabs((sx- (4.764367927995374e4))/sx) <= EPSILON) &&
(fabs((sy- (-8.084072988043731e4))/sy) <= EPSILON)) {
verified = TRUE;
}
}
Mops = pow(2.0, M+1)/tm/1000000.0;
printf("EP Benchmark Results: \n"
"CPU Time = %10.4f\n"
"N = 2^%5d\n"
"No. Gaussian Pairs = %15.0f\n"
"Sums = %25.15e %25.15e\n"
"Counts:\n",
tm, M, gc, sx, sy);
for (i = 0; i <= NQ-1; i++) {
printf("%3d %15.0f\n", i, q[i]);
}
c_print_results("EP", CLASS, M+1, 0, 0, nit, nthreads,
tm, Mops,
"Random numbers generated",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
if (TIMERS_ENABLED == TRUE) {
printf("Total time: %f", timer_read(1));
printf("Gaussian pairs: %f", timer_read(2));
printf("Random numbers: %f", timer_read(3));
}
}
|
conn.c | #include <omp.h>
#include <stdio.h>
#include <sys/mman.h>
#include "q_incs.h"
#include "_mmap.h"
#include "_rdtsc.h"
#define MAX_CHAIN_LENGTH 1024
int
main(
int argc,
char **argv
)
{
int status = 0;
char *infile = NULL;
char *opfile1 = NULL;
char *opfile2 = NULL;
int max_id;
int *new_E = NULL;
char *X = NULL; size_t nX = 0;
int *rep = NULL; // representative
FILE *ofp1 = NULL;
FILE *ofp2 = NULL;
if ( argc != 5 ) { go_BYE(-1); }
infile = argv[1];
opfile1 = argv[2];
opfile2 = argv[3];
if ( strcmp(infile, opfile1) == 0 ) { go_BYE(-1); }
if ( strcmp(opfile1, opfile2) == 0 ) { go_BYE(-1); }
max_id = atoi(argv[4]);
if ( max_id <= 0 ) { go_BYE(-1); }
int nV = max_id + 1;
// Make this a multiple of 2
if ( ( nV % 2 ) != 0 ) { nV++; }
if ( ( nV % 2 ) != 0 ) { go_BYE(-1); }
rep = malloc(nV * sizeof(int));
return_if_malloc_failed(rep);
#pragma omp parallel for
for ( int i = 0; i < nV; i++ ) {
rep[i] = i;
}
//-----------------------
status = rs_mmap(infile, &X, &nX, 0); cBYE(status);
uint64_t nE = nX / sizeof(int);
// Note that nE is a multiple of 2 and TWICE the number of edges
int *E = (int *)X;
uint64_t t_start = RDTSC();
omp_set_num_threads(4);
int nT = omp_get_num_threads();
uint64_t num_changes = 1; // to get into loop
for ( int iter = 1; num_changes != 0 ; iter++ ) {
int n_blocks = ( nE / nV ) / 4 ; // TODO: We are going to vary the 4
uint64_t block_size = nE / n_blocks;
uint64_t loop1 = 0, loop2 = 0;
num_changes = 0;
uint64_t l_num_changes = 0;
uint64_t t1;
uint64_t t_start = RDTSC();
uint64_t num_dead_edges = 0;
uint64_t num_exst_edges = 0;
uint64_t num_live_edges = 0;
fprintf(stderr, "Iteration %d \n", iter);
for ( uint64_t b = 0; b < n_blocks; b++ ) {
uint64_t lb = b * block_size;
uint64_t ub = lb + block_size;
if ( b == (n_blocks-1) ) { ub = nE; }
t1 = RDTSC();
for ( uint64_t e = lb; e < ub; e += 2 ) {
num_exst_edges++;
int from = E[e];
int to = E[e+1];
int from_rep = rep[from];
int to_rep = rep[to];
if ( from_rep < to_rep ) {
rep[to] = from_rep;
num_changes++;
num_live_edges++;
}
else if ( to_rep < from_rep ) {
rep[from] = to_rep;
num_changes++;
num_live_edges++;
}
else {
if ( ( from_rep != from ) && ( to_rep != to ) ) {
num_dead_edges++;
}
}
}
loop1 += RDTSC() - t1;
int x_n_blocks = nT;
int x_block_size = nV / x_n_blocks;
t1 = RDTSC();
// #pragma omp parallel for schedule(static, 1)
for ( int xb = 0; xb < x_n_blocks; xb++ ) {
int x_lb = xb * x_block_size;
int x_ub = x_lb + x_block_size;
if ( xb == (x_n_blocks-1) ) { x_ub = nV; }
// fprintf(stderr, "starting jumping loop\n");
for ( int i = x_lb; i < x_ub; i++ ) {
int chain[MAX_CHAIN_LENGTH];
#undef V1
#ifdef V1
int parent = rep[i];
if ( i != parent ) {
int grand_parent = rep[parent];
if ( parent != grand_parent ) {
rep[i] = grand_parent;
l_num_changes++;
}
}
#else
int me = i;
int parent = rep[i];
int chain_idx = 0;
while ( me != parent ) {
if ( chain_idx < MAX_CHAIN_LENGTH ) {
chain[chain_idx++] = parent;
}
me = parent, parent = rep[parent];
}
if ( parent != i ) {
l_num_changes++;
rep[i] = parent;
for ( int j = 0; j < chain_idx; j++ ) {
rep[chain[j]] = parent;
}
}
#endif
}
// fprintf(stderr, "stopping jumping loop\n");
}
loop2 += RDTSC() - t1;
}
fprintf(stderr, "num_dead_edges = %lf \n", (double)num_dead_edges);
fprintf(stderr, "num_live_edges = %lf \n", (double)num_live_edges);
fprintf(stderr, "num_exst_edges = %lf \n", (double)num_exst_edges);
fprintf(stderr, "Time loop1 = %lf \n", (double)loop1);
fprintf(stderr, "Time loop2 = %lf \n", (double)loop2);
fprintf(stderr, "Loop 1 num_changes = %lf \n", (double)num_changes);
fprintf(stderr, "Loop 2 num_changes = %lf \n", (double)l_num_changes);
uint64_t t_stop = RDTSC();
fprintf(stderr, "Time = %lf \n",
(t_stop - t_start)/(2800.0*1000000.0));
fprintf(stderr, "==============================\n");
if ( ( (double)num_live_edges / (double)num_exst_edges ) < 0.1 ) {
fprintf(stderr, "Condensing\n");
free_if_non_null(new_E);
int *new_E = malloc(num_live_edges * 2 * sizeof(int));
uint64_t eidx = 0;
uint64_t num_ignore = 0;
for ( uint64_t i = 0; i < nE; i += 2 ) {
int from = E[i];
int to = E[i+1];
int from_rep = rep[from];
int to_rep = rep[to];
if ( (from_rep == to_rep ) && ( from_rep != from ) &&
( to_rep != to ) ) {
/* ignore this edge */
num_ignore++;
}
else {
new_E[eidx++] = from;
new_E[eidx++] = to;
}
}
free_if_non_null(new_E); // TODO Should actually use it
fprintf(stderr, "num_ignore = %lf \n", (double)num_ignore);
fprintf(stderr, "new_nE = %lf \n", (double)eidx);
}
}
uint64_t t_stop = RDTSC();
fprintf(stderr, "Successfully calculated reps in time %lf\n",
(t_stop - t_start)/(2800.0*1000000.0));
ofp1 = fopen(opfile1, "wb");
return_if_fopen_failed(ofp1, opfile1, "wb");
ofp2 = fopen(opfile2, "w");
return_if_fopen_failed(ofp2, opfile2, "w");
for ( int i = 0; i < nV; i++ ) {
if ( rep[i] > 0 ) {
fwrite(&i, sizeof(int), 1, ofp1);
fprintf(ofp2, "%d\n", rep[i]);
/*
fwrite(rep+i, sizeof(int), 1, ofp2);
*/
}
}
fprintf(stderr, "Wrote answer \n");
BYE:
mcr_rs_munmap(X, nX);
free_if_non_null(rep);
free_if_non_null(new_E);
fclose_if_non_null(ofp1);
fclose_if_non_null(ofp2);
return status;
}
|
VectorofVector.h | /*
* VectorofVector.h
*
* Created on: 21/feb/2017
* Author: samuele
*/
#ifndef UTILITIES_VECTOROFVECTOR_H_
#define UTILITIES_VECTOROFVECTOR_H_
#include <vector>
#include <memory>
#include <stdexcept>
#include <cassert>
using namespace std;
#define NDEBUG
template<typename T>
class Vector_of_Vector {
struct Row {
size_t start;
size_t size;
};
public:
typedef T value_type;
typedef reference_wrapper<value_type> reference_value_type;
Vector_of_Vector() {
}
//init matrix
Vector_of_Vector(size_t rows, size_t cols) {
this->resize(rows, cols);
}
Vector_of_Vector(size_t rows, size_t cols, const value_type& val) {
this->resize(rows, cols, val);
}
//init vector of vector with different size
Vector_of_Vector(const vector<size_t> size_rows){
this->resize(size_rows);
}
Vector_of_Vector(const vector<size_t> size_rows, const value_type& val) {
this->resize(size_rows, val);
}
virtual ~Vector_of_Vector() {
}
inline const value_type& get(size_t row, size_t col) const{
// if(col >= this->size_row(row))
// throw out_of_range("out_of_range error: col >= this->size_row(row) -> " + to_string(col) + " >= " + to_string(this->size_row(row)));
return this->data[this->rows[row].start+col];
}
inline value_type& get(size_t row, size_t col) {
// if(col >= this->size_row(row))
// throw out_of_range("out_of_range error: col >= this->size_row(row) -> " + to_string(col) + " >= " + to_string(this->size_row(row)));
return this->data[this->rows[row].start+col];
}
//return only reference for object in matrix,
//insert in this vector not insert in matrix,
//for insert use insert method
inline void get(size_t row, vector<reference_value_type>& result) {
if(row >= this->rows.size())
throw out_of_range("out_of_range error: row >= this->rows.size() -> " + to_string(row) + " >= " + to_string(this->rows.size()));
size_t row_size = this->size_row(row);
result.clear();
result.reserve(row_size);
for(size_t i = this->rows[row].start; i < this->rows[row].start + row_size; ++i)
result.push_back(this->data[i]);
}
inline void insert(size_t row, size_t col, const T& value) {
if(row > this->rows.size())
throw out_of_range("out_of_range error: row > this->rows.size() -> " + to_string(row) + " > " + to_string(this->rows.size()));
if(col > (row == this->rows.size() ? 0:size_row(row)))
throw out_of_range("out_of_range error: col > (row == this->rows.size() ? 0:size_row(row)) -> " + to_string(col) + " > " + to_string((row == this->rows.size() ? 0:size_row(row))));
auto pos_insert = advance(this->data.begin(), row*col);
this->data.insert(pos_insert, value);
if(row == this->rows.size())
{
Row new_row;
new_row.start = this->data.size()-1;
new_row.size = 1;
this->rows.push_back(move(new_row));
}
else
{
++this->rows[row].size;
#pragma omp parallel for
for(size_t i = row+1; i < this->rows.size(); ++i)
++this->rows[i].start;
}
}
inline void insert(size_t row, size_t col, T&& value) {
if(row > this->rows.size())
throw out_of_range("out_of_range error: row > this->rows.size() -> " + to_string(row) + " > " + to_string(this->rows.size()));
if(col > (row == this->rows.size() ? 0:size_row(row)))
throw out_of_range("out_of_range error: col > (row == this->rows.size() ? 0:size_row(row)) -> " + to_string(col) + " > " + to_string((row == this->rows.size() ? 0:size_row(row))));
auto pos_insert = advance(this->data.begin(), row*col);
this->data.insert(pos_insert, value);
if(row == this->rows.size())
{
Row new_row;
new_row.start = this->data.size()-1;
new_row.size = 1;
this->rows.push_back(move(new_row));
}
else
{
++this->rows[row].size;
#pragma omp parallel for
for(size_t i = row+1; i < this->rows.size(); ++i)
++this->rows[i].start;
}
}
inline size_t size_row(size_t row) const {
// if(row >= this->rows.size())
// throw out_of_range("out_of_range error: row >= this->rows.size() -> " + to_string(row) + " >= " + to_string(this->rows.size()));
return this->rows[row].size;
}
inline size_t size() const {
return this->data.size();
}
inline void clear() {
this->data.clear();
this->rows.clear();
}
inline void swap(Vector_of_Vector& swap_el) {
this->data.swap(swap_el.data);
this->rows.swap(swap_el.rows);
}
inline void shrink_to_fit() {
this->data.shrink_to_fit();
this->rows.shrink_to_fit();
}
inline void reserve(size_t rows, size_t cols) {
this->data.reserve(rows*cols);
this->rows.reserve(rows);
}
inline void resize(size_t rows, size_t cols) {
this->resize(rows, cols, value_type());
}
inline void resize(size_t rows, size_t cols, const value_type& val) {
this->data.resize(rows*cols, val);
this->rows.resize(rows);
#pragma omp parallel for
for(size_t i = 0; i < this->rows.size(); ++i)
{
this->rows[i].start = i*cols;
this->rows[i].size = cols;
}
}
inline void resize(const vector<size_t> size_rows){
this->resize(size_rows, value_type());
}
inline void resize(const vector<size_t> size_rows, const value_type& val) {
size_t tot = accumulate(size_rows.begin(), size_rows.end(), 0);
this->data.resize(tot, val);
this->rows.resize(size_rows.size());
#pragma omp parallel for
for(size_t i = 0; i < this->rows.size(); ++i)
this->rows[i].size = size_rows[i];
size_t start_idx = 0;
for(size_t i = 0; i < this->rows.size(); ++i)
{
this->rows[i].start = start_idx;
start_idx += size_rows[i];
}
}
private:
vector<value_type> data;
vector<Row> rows;
};
#endif /* UTILITIES_VECTOROFVECTOR_H_ */
|
extra_data.c | //
// Created by sachetto on 01/10/17.
//
#include "../config/extra_data_config.h"
#include "../config_helpers/config_helpers.h"
#include "../libraries_common/common_data_structures.h"
real* set_commom_schemia_data(struct config *config, uint32_t num_cells, int num_par, size_t *extra_data_size) {
*extra_data_size = sizeof(real)*(num_cells + num_par);
real *extra_data = (real*)malloc(*extra_data_size);
real atpi = 6.8;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, atpi, config->config_data, "atpi");
real Ko = 5.4;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Ko, config->config_data, "Ko");
real Ki = 138.3;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Ki, config->config_data, "Ki");
real GNa_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, GNa_multiplicator, config->config_data, "GNa_multiplicator");
real GCaL_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, GCaL_multiplicator, config->config_data, "GCaL_multiplicator");
real INaCa_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, INaCa_multiplicator, config->config_data, "INaCa_multiplicator");
real Vm_modifier = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Vm_modifier, config->config_data, "Vm_modifier");
extra_data[0] = atpi;
extra_data[1] = Ko;
extra_data[2] = Ki;
extra_data[3] = Vm_modifier;
extra_data[4] = GNa_multiplicator;
extra_data[5] = GCaL_multiplicator;
extra_data[6] = INaCa_multiplicator;
return extra_data;
}
SET_EXTRA_DATA(set_extra_data_for_fibrosis_sphere) {
uint32_t num_active_cells = the_grid->num_active_cells;
struct cell_node ** ac = the_grid->active_cells;
real *fibs = NULL;
real plain_center = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, plain_center, config->config_data, "plain_center");
real border_zone_size = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, border_zone_size, config->config_data, "border_zone_size");
real sphere_radius = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, sphere_radius, config->config_data, "sphere_radius");
int num_par = 7;
fibs = set_commom_schemia_data(config, num_active_cells, num_par, extra_data_size);
#pragma omp parallel for
for (uint32_t i = 0; i < num_active_cells; i++) {
if(FIBROTIC(ac[i])) {
fibs[i+num_par] = 0.0;
}
else if(BORDER_ZONE(ac[i])) {
real center_x = (real)ac[i]->center.x;
real center_y = (real)ac[i]->center.y;
//TODO: Maybe we want the distance from the Z as well
//real center_z = (real)ac[i]->center_z;
real distanceFromCenter = sqrtf((center_x - plain_center)*(center_x - plain_center) + (center_y - plain_center)*(center_y - plain_center));
distanceFromCenter = (distanceFromCenter - sphere_radius)/border_zone_size;
fibs[i+num_par] = distanceFromCenter;
}
else {
fibs[i+num_par] = 1.0;
}
}
return (void*)fibs;
}
SET_EXTRA_DATA(set_extra_data_for_fibrosis_plain) {
uint32_t num_active_cells = the_grid->num_active_cells;
int num_par = 7;
real *fibs = NULL;
fibs = set_commom_schemia_data(config, num_active_cells, num_par, extra_data_size);
for(uint32_t i = num_par; i < num_active_cells + num_par; i++) {
fibs[i] = 0.0;
}
return (void*)fibs;
}
SET_EXTRA_DATA(set_extra_data_for_no_fibrosis) {
uint32_t num_active_cells = the_grid->num_active_cells;
int num_par = 7;
real *fibs = NULL;
fibs = set_commom_schemia_data(config, num_active_cells, num_par, extra_data_size);
for(uint32_t i = num_par; i < num_active_cells + num_par; i++) {
fibs[i] = 1.0;
}
return (void*)fibs;
}
SET_EXTRA_DATA(set_extra_data_for_human_full_mesh) {
uint32_t num_active_cells = the_grid->num_active_cells;
int num_par = 7;
real *fibs = NULL;
fibs = set_commom_schemia_data(config, num_active_cells, num_par, extra_data_size);
struct cell_node ** ac = the_grid->active_cells;
real_cpu small_scar_center_x = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, small_scar_center_x, config->config_data, "small_scar_center_x");
real_cpu small_scar_center_y = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, small_scar_center_y, config->config_data, "small_scar_center_y");
real_cpu small_scar_center_z = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, small_scar_center_z, config->config_data, "small_scar_center_z");
real_cpu big_scar_center_x = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, big_scar_center_x, config->config_data, "big_scar_center_x");
real_cpu big_scar_center_y = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, big_scar_center_y, config->config_data, "big_scar_center_y");
real_cpu big_scar_center_z = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, big_scar_center_z, config->config_data, "big_scar_center_z");
real_cpu bz_size_big = 0;
real_cpu bz_size_small = 0;
real_cpu dist_big = 0;
real_cpu dist_small = 0;
uint32_t i;
bool fibrotic, border_zone;
char scar_type;
//#pragma omp parallel for private(dist_big, dist_small) reduction(max: bz_size_big, bz_size_small)
#pragma omp parallel for private(dist_big, dist_small)
for (i = 0; i < num_active_cells; i++) {
border_zone = BORDER_ZONE(ac[i]);
scar_type = SCAR_TYPE(ac[i]);
if (ac[i]->active && border_zone) {
real_cpu center_x = ac[i]->center.x;
real_cpu center_y = ac[i]->center.y;
real_cpu center_z = ac[i]->center.z;
if(scar_type == 'b') {
dist_big = sqrt((center_x - big_scar_center_x) * (center_x - big_scar_center_x) +
(center_y - big_scar_center_y) * (center_y - big_scar_center_y) +
(center_z - big_scar_center_z) * (center_z - big_scar_center_z));
#pragma omp critical(big)
if (dist_big > bz_size_big) {
bz_size_big = dist_big;
}
}
else if(scar_type == 's') {
dist_small = sqrt((center_x - small_scar_center_x) * (center_x - small_scar_center_x) +
(center_y - small_scar_center_y) * (center_y - small_scar_center_y) +
(center_z - small_scar_center_z) * (center_z - small_scar_center_z));
#pragma omp critical(small)
if (dist_small > bz_size_small) {
bz_size_small = dist_small;
}
}
}
}
#pragma omp parallel for private(dist_big, dist_small)
for (i = 0; i < num_active_cells; i++) {
if (ac[i]->active) {
fibrotic = FIBROTIC(ac[i]);
border_zone = BORDER_ZONE(ac[i]);
scar_type = SCAR_TYPE(ac[i]);
if(fibrotic) {
fibs[i+num_par] = 0.0f;
}
else if (border_zone) {
real_cpu center_x = ac[i]->center.x;
real_cpu center_y = ac[i]->center.y;
real_cpu center_z = ac[i]->center.z;
if(scar_type == 'b') {
dist_big = sqrt((center_x - big_scar_center_x) * (center_x - big_scar_center_x) +
(center_y - big_scar_center_y) * (center_y - big_scar_center_y) +
(center_z - big_scar_center_z) * (center_z - big_scar_center_z));
fibs[i+num_par] = (real)(dist_big / bz_size_big);
}
else if(scar_type == 's') {
dist_small = sqrt((center_x - small_scar_center_x) * (center_x - small_scar_center_x) +
(center_y - small_scar_center_y) * (center_y - small_scar_center_y) +
(center_z - small_scar_center_z) * (center_z - small_scar_center_z));
fibs[i+num_par] = (real)(dist_small / bz_size_small);
}
else {
fibs[i+num_par] = 1.0f;
}
}
}
}
return (void*)fibs;
}
SET_EXTRA_DATA(set_extra_data_for_scar_wedge) {
uint32_t num_active_cells = the_grid->num_active_cells;
real *fibs = NULL;
int num_par = 7;
fibs = set_commom_schemia_data(config, num_active_cells, num_par, extra_data_size);
struct cell_node ** ac = the_grid->active_cells;
char *scar_size;
GET_PARAMETER_VALUE_CHAR_OR_REPORT_ERROR (scar_size, config->config_data, "scar_size");
uint8_t size_code;
if(strcmp(scar_size, "big") == 0) {
size_code = 0;
}
else if(strcmp(scar_size, "small") == 0) {
size_code = 1;
}
else {
printf("Function: set_extra_data_for_scar_edge, invalid scar size %s. Valid sizes are big or small. Exiting!\n", scar_size);
exit(EXIT_FAILURE);
}
real_cpu scar_center_x;
real_cpu scar_center_y;
real_cpu scar_center_z;
////Fibrosis configuration
//BIG SCAR
if(size_code == 0) {
scar_center_x = 95300;
scar_center_y = 81600;
scar_center_z = 36800;
}
else {
scar_center_x = 52469;
scar_center_y = 83225;
scar_center_z = 24791;
}
real_cpu bz_size = 0.0;
real_cpu dist;
uint32_t i;
bool border_zone, fibrotic;
// #pragma omp parallel for private(dist) reduction(max: bz_size)
#pragma omp parallel for private(dist)
for (i = 0; i < num_active_cells; i++) {
if(ac[i]->active) {
border_zone = BORDER_ZONE(ac[i]);
if(border_zone) {
real_cpu center_x = ac[i]->center.x;
real_cpu center_y = ac[i]->center.y;
real_cpu center_z = ac[i]->center.z;
dist = sqrt((center_x - scar_center_x)*(center_x - scar_center_x) + (center_y - scar_center_y)*(center_y - scar_center_y) + (center_z - scar_center_z)*(center_z - scar_center_z) );
#pragma omp critical
if(dist > bz_size) {
bz_size = dist;
}
}
}
}
#pragma omp parallel for private(dist)
for (i = 0; i < num_active_cells; i++) {
if(ac[i]->active) {
border_zone = BORDER_ZONE(ac[i]);
fibrotic = FIBROTIC(ac[i]);
if(fibrotic) {
fibs[i+num_par] = 0.0;
}
else if(border_zone) {
real_cpu center_x = ac[i]->center.x;
real_cpu center_y = ac[i]->center.y;
real_cpu center_z = ac[i]->center.z;
dist = sqrt((center_x - scar_center_x)*(center_x - scar_center_x) + (center_y - scar_center_y)*(center_y - scar_center_y) + (center_z - scar_center_z)*(center_z - scar_center_z) );
dist = dist/bz_size;
fibs[i + num_par] = (real)dist;
}
else {
fibs[i + num_par] = 1.0f;
}
}
}
return (void*)fibs;
}
SET_EXTRA_DATA(set_extra_data_for_benchmark) {
*extra_data_size = sizeof(real)*19;
real *initial_conditions = (real*)malloc(*extra_data_size);
// Initial conditions // Var Units Initial value
initial_conditions[ 0] = -85.423f; // V; millivolt; -85.423
initial_conditions[ 1] = 0.0165; // Xr1; dimensionless; 0.0165
initial_conditions[ 2] = 0.473; // Xr2; dimensionless; 0.473
initial_conditions[ 3] = 0.0174; // Xs; dimensionless; 0.0174
initial_conditions[ 4] = 0.00165; // m; dimensionless; 0.00165
initial_conditions[ 5] = 0.749; // h; dimensionless; 0.749
initial_conditions[ 6] = 0.6788; // j; dimensionless; 0.6788
initial_conditions[ 7] = 3.288e-5; // d; dimensionless; 3.288e-5
initial_conditions[ 8] = 0.7026; // f; dimensionless; 0.7026
initial_conditions[ 9] = 0.9526; // f2; dimensionless; 0.9526
initial_conditions[10] = 0.9942; // fCass; dimensionless; 0.9942
initial_conditions[11] = 0.999998; // s; dimensionless; 0.999998
initial_conditions[12] = 2.347e-8; // r; dimensionless; 2.347e-8
initial_conditions[13] = 0.000153; // Ca_i; millimolar; 0.000153
initial_conditions[14] = 4.272; // Ca_SR; millimolar; 4.272
initial_conditions[15] = 0.00042; // Ca_ss; millimolar; 0.00042
initial_conditions[16] = 0.8978; // R_prime; dimensionless; 0.8978
initial_conditions[17] = 10.132; // Na_i; millimolar; 10.132
initial_conditions[18] = 138.52; // K_i; millimolar; 138.52
return (void*)initial_conditions;
}
// TODO: Fix this function after the Scientific_reports_Fig4a test
SET_EXTRA_DATA(set_extra_data_for_fibrosis_sphere_atpi_changed) {
uint32_t num_active_cells = the_grid->num_active_cells;
struct cell_node ** ac = the_grid->active_cells;
real plain_center = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, plain_center, config->config_data, "plain_center");
real border_zone_size = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, border_zone_size, config->config_data, "border_zone_size");
real sphere_radius = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, sphere_radius, config->config_data, "sphere_radius");
int num_par = 7;
int num_tt_par = 12;
//num_tt_par = 12 initial conditions of tt3, num_par 7, extra data
*extra_data_size = sizeof(real)*(num_par+num_tt_par+num_active_cells);
real *extra_data = (real*)malloc(*extra_data_size);
real atpi = 6.8;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, atpi, config->config_data, "atpi");
real Ko = 5.4;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Ko, config->config_data, "Ko");
real Ki = 138.3;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Ki, config->config_data, "Ki");
real GNa_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, GNa_multiplicator, config->config_data, "GNa_multiplicator");
real GCa_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, GCa_multiplicator, config->config_data, "GCa_multiplicator");
real INaCa_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, INaCa_multiplicator, config->config_data, "INaCa_multiplicator");
real Vm_modifier = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Vm_modifier, config->config_data, "Vm_modifier");
// Extra parameters section
extra_data[0] = atpi;
extra_data[1] = Ko;
extra_data[2] = Ki;
extra_data[3] = Vm_modifier;
extra_data[4] = GNa_multiplicator;
extra_data[5] = GCa_multiplicator;
extra_data[6] = INaCa_multiplicator;
// Extra initial conditions section (atpi = 2.0)
extra_data[7] = -86.315208;
extra_data[8] = 0.001362;
extra_data[9] = 0.773427;
extra_data[10] = 0.717868;
extra_data[11] = 0.001977;
extra_data[12] = 0.003678;
extra_data[13] = 0.585249;
extra_data[14] = 0.987165;
extra_data[15] = 0.999538;
extra_data[16] = 0.000029;
extra_data[17] = 0.000000;
extra_data[18] = 0.482457;
// Fibrotic cells configuration
#pragma omp parallel for
for (uint32_t i = 0; i < num_active_cells; i++) {
if(FIBROTIC(ac[i])) {
extra_data[i+num_par+num_tt_par] = 0.0;
}
else if(BORDER_ZONE(ac[i])) {
real center_x = (real)ac[i]->center.x;
real center_y = (real)ac[i]->center.y;
//TODO: Maybe we want the distance from the Z as well
//real center_z = (real)ac[i]->center_z;
real distanceFromCenter = sqrtf((center_x - plain_center)*(center_x - plain_center) + (center_y - plain_center)*(center_y - plain_center));
distanceFromCenter = (distanceFromCenter - sphere_radius)/border_zone_size;
extra_data[i+num_par+num_tt_par] = distanceFromCenter;
}
else {
extra_data[i+num_par+num_tt_par] = 1.0;
}
}
return (void*)extra_data;
}
SET_EXTRA_DATA(set_extra_data_sensibility_pedro) {
uint32_t num_active_cells = the_grid->num_active_cells; // Pedro sensibility simulations
int num_par = 7;
int num_init_condit = 12;
// num_init_condit = 12 initial conditions of tt3, num_par 7, extra data
*extra_data_size = sizeof(real)*(num_par+num_init_condit+num_active_cells);
real *extra_data = (real*)malloc(*extra_data_size);
real atpi = 6.8;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, atpi, config->config_data, "atpi");
real Ko = 5.4;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Ko, config->config_data, "Ko");
real Ki = 138.3;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Ki, config->config_data, "Ki");
real Vm_modifier = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Vm_modifier, config->config_data, "Vm_modifier");
real GNa_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, GNa_multiplicator, config->config_data, "GNa_multiplicator");
real GCaL_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, GCaL_multiplicator, config->config_data, "GCaL_multiplicator");
real INaCa_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, INaCa_multiplicator, config->config_data, "INaCa_multiplicator");
real sv_0 = -86.2f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_0, config->config_data, "sv_0");
real sv_1 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_1, config->config_data, "sv_1");
real sv_2 = 0.75f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_2, config->config_data, "sv_2");
real sv_3 = 0.75f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_3, config->config_data, "sv_3");
real sv_4 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_4, config->config_data, "sv_4");
real sv_5 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_5, config->config_data, "sv_5");
real sv_6 = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_6, config->config_data, "sv_6");
real sv_7 = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_7, config->config_data, "sv_7");
real sv_8 = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_8, config->config_data, "sv_8");
real sv_9 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_9, config->config_data, "sv_9");
real sv_10 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_10, config->config_data, "sv_10");
real sv_11 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_11, config->config_data, "sv_11");
// Set additional parameters section
extra_data[0] = atpi;
extra_data[1] = Ko;
extra_data[2] = Ki;
extra_data[3] = Vm_modifier;
extra_data[4] = GNa_multiplicator;
extra_data[5] = GCaL_multiplicator;
extra_data[6] = INaCa_multiplicator;
// Set initial conditions section
extra_data[7] = sv_0;
extra_data[8] = sv_1;
extra_data[9] = sv_2;
extra_data[10] = sv_3;
extra_data[11] = sv_4;
extra_data[12] = sv_5;
extra_data[13] = sv_6;
extra_data[14] = sv_7;
extra_data[15] = sv_8;
extra_data[16] = sv_9;
extra_data[17] = sv_10;
extra_data[18] = sv_11;
// Set fibrosis section
bool healthy_cell = false;
for(uint32_t i = num_par+num_init_condit; i < num_active_cells + num_par + num_init_condit; i++)
{
if (healthy_cell)
extra_data[i] = 1.0;
else
extra_data[i] = 0.0;
}
return (void*)extra_data;
}
SET_EXTRA_DATA(set_extra_data_sensibility_brodie) {
uint32_t num_active_cells = the_grid->the_purkinje->num_active_purkinje_cells; // Broodie sensibility simulations
int num_par = 7;
int num_init_condit = 12;
// num_init_condit = 12 initial conditions of tt3, num_par 7, extra data
*extra_data_size = sizeof(real)*(num_par+num_init_condit+num_active_cells);
real *extra_data = (real*)malloc(*extra_data_size);
real atpi = 6.8;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, atpi, config->config_data, "atpi");
real Ko = 5.4;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Ko, config->config_data, "Ko");
real Ki = 138.3;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Ki, config->config_data, "Ki");
real Vm_modifier = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Vm_modifier, config->config_data, "Vm_modifier");
real GNa_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, GNa_multiplicator, config->config_data, "GNa_multiplicator");
real GCaL_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, GCaL_multiplicator, config->config_data, "GCaL_multiplicator");
real INaCa_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, INaCa_multiplicator, config->config_data, "INaCa_multiplicator");
real sv_0 = -86.2f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_0, config->config_data, "sv_0");
real sv_1 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_1, config->config_data, "sv_1");
real sv_2 = 0.75f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_2, config->config_data, "sv_2");
real sv_3 = 0.75f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_3, config->config_data, "sv_3");
real sv_4 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_4, config->config_data, "sv_4");
real sv_5 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_5, config->config_data, "sv_5");
real sv_6 = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_6, config->config_data, "sv_6");
real sv_7 = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_7, config->config_data, "sv_7");
real sv_8 = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_8, config->config_data, "sv_8");
real sv_9 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_9, config->config_data, "sv_9");
real sv_10 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_10, config->config_data, "sv_10");
real sv_11 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_11, config->config_data, "sv_11");
// Set additional parameters section
extra_data[0] = atpi;
extra_data[1] = Ko;
extra_data[2] = Ki;
extra_data[3] = Vm_modifier;
extra_data[4] = GNa_multiplicator;
extra_data[5] = GCaL_multiplicator;
extra_data[6] = INaCa_multiplicator;
// Set initial conditions section
extra_data[7] = sv_0;
extra_data[8] = sv_1;
extra_data[9] = sv_2;
extra_data[10] = sv_3;
extra_data[11] = sv_4;
extra_data[12] = sv_5;
extra_data[13] = sv_6;
extra_data[14] = sv_7;
extra_data[15] = sv_8;
extra_data[16] = sv_9;
extra_data[17] = sv_10;
extra_data[18] = sv_11;
// Set fibrosis section
bool healthy_cell = false;
for(uint32_t i = num_par+num_init_condit; i < num_active_cells + num_par + num_init_condit; i++)
{
if (healthy_cell)
extra_data[i] = 1.0;
else
extra_data[i] = 0.0;
}
return (void*)extra_data;
}
//Extra data for percolation fibrosis with circle
SET_EXTRA_DATA(set_extra_data_sensibility_sphere_fibrosis) {
uint32_t num_active_cells = the_grid->num_active_cells;
struct cell_node ** ac = the_grid->active_cells;
//~ int num_par = 7;
//~ int num_init_condit = 12;
int num_par = 14;
int num_init_condit = 24;
// num_init_condit = 14 initial conditions of tt3, num_par 24, extra data
*extra_data_size = sizeof(real)*(num_par+num_init_condit+num_active_cells);
real *extra_data = (real*)malloc(*extra_data_size);
/*Region 1*/
real atpi = 6.8;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, atpi, config->config_data, "atpi");
real Ko = 5.4;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Ko, config->config_data, "Ko");
real Ki = 138.3;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Ki, config->config_data, "Ki");
real Vm_modifier = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Vm_modifier, config->config_data, "Vm_modifier");
real GNa_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, GNa_multiplicator, config->config_data, "GNa_multiplicator");
real GCaL_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, GCaL_multiplicator, config->config_data, "GCaL_multiplicator");
real INaCa_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, INaCa_multiplicator, config->config_data, "INaCa_multiplicator");
real sv_0 = -86.2f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_0, config->config_data, "sv_0");
real sv_1 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_1, config->config_data, "sv_1");
real sv_2 = 0.75f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_2, config->config_data, "sv_2");
real sv_3 = 0.75f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_3, config->config_data, "sv_3");
real sv_4 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_4, config->config_data, "sv_4");
real sv_5 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_5, config->config_data, "sv_5");
real sv_6 = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_6, config->config_data, "sv_6");
real sv_7 = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_7, config->config_data, "sv_7");
real sv_8 = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_8, config->config_data, "sv_8");
real sv_9 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_9, config->config_data, "sv_9");
real sv_10 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_10, config->config_data, "sv_10");
real sv_11 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_11, config->config_data, "sv_11");
/*Region 2*/
real atpi_2 = 6.8;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, atpi_2, config->config_data, "atpi_2");
real Ko_2 = 5.4;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Ko_2, config->config_data, "Ko_2");
real Ki_2 = 138.3;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Ki_2, config->config_data, "Ki_2");
real Vm_modifier_2 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Vm_modifier_2, config->config_data, "Vm_modifier_2");
real GNa_multiplicator_2 = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, GNa_multiplicator_2, config->config_data, "GNa_multiplicator_2");
real GCaL_multiplicator_2 = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, GCaL_multiplicator_2, config->config_data, "GCaL_multiplicator_2");
real INaCa_multiplicator_2 = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, INaCa_multiplicator_2, config->config_data, "INaCa_multiplicator_2");
real sv_0_2 = -86.2f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_0_2, config->config_data, "sv_0_2");
real sv_1_2 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_1_2, config->config_data, "sv_1_2");
real sv_2_2 = 0.75f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_2_2, config->config_data, "sv_2_2");
real sv_3_2 = 0.75f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_3_2, config->config_data, "sv_3_2");
real sv_4_2 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_4_2, config->config_data, "sv_4_2");
real sv_5_2 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_5_2, config->config_data, "sv_5_2");
real sv_6_2 = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_6_2, config->config_data, "sv_6_2");
real sv_7_2 = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_7_2, config->config_data, "sv_7_2");
real sv_8_2 = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_8_2, config->config_data, "sv_8_2");
real sv_9_2 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_9_2, config->config_data, "sv_9_2");
real sv_10_2 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_10_2, config->config_data, "sv_10_2");
real sv_11_2 = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, sv_11_2, config->config_data, "sv_11_2");
/*Region 1*/
// Set additional parameters section Reg 1
extra_data[0] = atpi;
extra_data[1] = Ko;
extra_data[2] = Ki;
extra_data[3] = Vm_modifier;
extra_data[4] = GNa_multiplicator;
extra_data[5] = GCaL_multiplicator;
extra_data[6] = INaCa_multiplicator;
// Set initial conditions section Reg 1
extra_data[7] = sv_0;
extra_data[8] = sv_1;
extra_data[9] = sv_2;
extra_data[10] = sv_3;
extra_data[11] = sv_4;
extra_data[12] = sv_5;
extra_data[13] = sv_6;
extra_data[14] = sv_7;
extra_data[15] = sv_8;
extra_data[16] = sv_9;
extra_data[17] = sv_10;
extra_data[18] = sv_11;
/*Region 2*/
// Set additional parameters section Reg 2
extra_data[19] = atpi_2;
extra_data[20] = Ko_2;
extra_data[21] = Ki_2;
extra_data[22] = Vm_modifier_2;
extra_data[23] = GNa_multiplicator_2;
extra_data[24] = GCaL_multiplicator_2;
extra_data[25] = INaCa_multiplicator_2;
// Set initial conditions section Reg 2
extra_data[26] = sv_0_2;
extra_data[27] = sv_1_2;
extra_data[28] = sv_2_2;
extra_data[29] = sv_3_2;
extra_data[30] = sv_4_2;
extra_data[31] = sv_5_2;
extra_data[32] = sv_6_2;
extra_data[33] = sv_7_2;
extra_data[34] = sv_8_2;
extra_data[35] = sv_9_2;
extra_data[36] = sv_10_2;
extra_data[37] = sv_11_2;
real plain_center = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, plain_center, config->config_data, "plain_center");
real border_zone_size = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, border_zone_size, config->config_data, "border_zone_size");
real sphere_radius = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, sphere_radius, config->config_data, "sphere_radius");
real fib_radius = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, fib_radius, config->config_data, "fibrosis_radius");
#pragma omp parallel for
for (uint32_t i = 0; i < num_active_cells; i++)
{
if(FIBROTIC(ac[i]))
{
extra_data[i+num_par+num_init_condit] = 0.0;
}
else if(BORDER_ZONE(ac[i]))
{
real center_x = (real)ac[i]->center.x;
real center_y = (real)ac[i]->center.y;
//TODO: Maybe we want the distance from the Z as well
//real center_z = (real)ac[i]->center_z;
real distanceFromCenter = sqrtf((center_x - plain_center)*(center_x - plain_center) + (center_y - plain_center)*(center_y - plain_center));
distanceFromCenter = (distanceFromCenter - sphere_radius)/border_zone_size;
extra_data[i+num_par+num_init_condit] = distanceFromCenter;
}
else
{
extra_data[i+num_par+num_init_condit] = 1.0;
}
}
return (void*)extra_data;
}
|
prepress.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR EEEEE PPPP RRRR EEEEE SSSSS SSSSS %
% P P R R E P P R R E SS SS %
% PPPP RRRR EEE PPPP RRRR EEE SSS SSS %
% P R R E P R R E SS SS %
% P R R EEEEE P R R EEEEE SSSSS SSSSS %
% %
% %
% MagickCore Prepress Methods %
% %
% Software Design %
% Cristy %
% October 2001 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/prepress.h"
#include "MagickCore/resource_.h"
#include "MagickCore/registry.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T o t a l I n k D e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageTotalInkDensity() returns the total ink density for a CMYK image.
% Total Ink Density (TID) is determined by adding the CMYK values in the
% darkest shadow area in an image.
%
% The format of the GetImageTotalInkDensity method is:
%
% double GetImageTotalInkDensity(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double GetImageTotalInkDensity(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
total_ink_density;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ColorSeparatedImageRequired","`%s'",image->filename);
return(0.0);
}
status=MagickTrue;
total_ink_density=0.0;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
density;
register const Quantum
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
density=(double) GetPixelRed(image,p)+GetPixelGreen(image,p)+
GetPixelBlue(image,p)+GetPixelBlack(image,p);
if (density > total_ink_density)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageTotalInkDensity)
#endif
{
if (density > total_ink_density)
total_ink_density=density;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
total_ink_density=0.0;
return(total_ink_density);
}
|
mlp_example_bf16_amx_numa.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Evangelos Georganas, Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <libxsmm_sync.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#if defined(_OPENMP)
# include <omp.h>
#endif
#include <numa.h>
/* include c-based dnn library */
#include "../common/dnn_common.h"
#define CHECK_L1
#define OVERWRITE_DOUTPUT_BWDUPD
#define _mm512_load_fil(A) _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepi16_epi32(_mm256_loadu_si256((__m256i*)(A))),16))
#define _mm512_store_fil(A,B) _mm256_storeu_si256((__m256i*)(A), (__m256i)_mm512_cvtneps_pbh((B)))
LIBXSMM_INLINE void my_init_buf(float* buf, size_t size, int initPos, int initOne)
{
int i;
zero_buf(buf, size);
for (i = 0; i < (int)size; ++i) {
buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0)));
}
}
LIBXSMM_INLINE void my_init_buf_bf16(libxsmm_bfloat16* buf, size_t size, int initPos, int initOne)
{
int i;
zero_buf_bf16(buf, size);
for (i = 0; i < (int)size; ++i) {
libxsmm_bfloat16_hp tmp;
tmp.f = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0)));
buf[i] = tmp.i[1];
}
}
#if 0
LIBXSMM_INLINE void my_matrix_copy_KCCK_to_KCCK_vnni(float *src, float *dst, int C, int K, int bc, int bk)
{
int k1, k2, c1, c2;
int kBlocks = K/bk;
int cBlocks = C/bc;
LIBXSMM_VLA_DECL(4, float, real_src, src, cBlocks, bc, bk);
LIBXSMM_VLA_DECL(5, float, real_dst, dst, cBlocks, bc/2, bk, 2);
for (k1 = 0; k1 < kBlocks; k1++) {
for (c1 = 0; c1 < cBlocks; c1++) {
for (c2 = 0; c2 < bc; c2++) {
for (k2 = 0; k2 < bk; k2++) {
LIBXSMM_VLA_ACCESS(5, real_dst, k1, c1, c2/2, k2, c2%2, cBlocks, bc/2, bk, 2) = LIBXSMM_VLA_ACCESS(4, real_src, k1, c1, c2, k2, cBlocks, bc, bk);
}
}
}
}
}
#endif
typedef enum my_eltwise_fuse {
MY_ELTWISE_FUSE_NONE = 0,
MY_ELTWISE_FUSE_BIAS = 1,
MY_ELTWISE_FUSE_RELU = 2,
MY_ELTWISE_FUSE_BIAS_RELU = MY_ELTWISE_FUSE_BIAS | MY_ELTWISE_FUSE_RELU
} my_eltwise_fuse;
typedef enum my_pass {
MY_PASS_FWD = 1,
MY_PASS_BWD_D = 2,
MY_PASS_BWD_W = 4,
MY_PASS_BWD = 6
} my_pass;
typedef struct my_opt_config {
libxsmm_blasint C;
libxsmm_blasint K;
libxsmm_blasint bc;
libxsmm_blasint bk;
libxsmm_blasint threads;
float lr;
size_t scratch_size;
libxsmm_barrier* barrier;
} my_opt_config;
typedef struct my_smax_fwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint bn;
libxsmm_blasint bc;
libxsmm_blasint threads;
size_t scratch_size;
libxsmm_barrier* barrier;
} my_smax_fwd_config;
typedef struct my_smax_bwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint bn;
libxsmm_blasint bc;
libxsmm_blasint threads;
size_t scratch_size;
float loss_weight;
libxsmm_barrier* barrier;
} my_smax_bwd_config;
typedef struct my_fc_fwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint K;
libxsmm_blasint bn;
libxsmm_blasint bc;
libxsmm_blasint bk;
libxsmm_blasint threads;
my_eltwise_fuse fuse_type;
libxsmm_blasint fwd_bf;
libxsmm_blasint fwd_2d_blocking;
libxsmm_blasint fwd_col_teams;
libxsmm_blasint fwd_row_teams;
size_t scratch_size;
libxsmm_barrier* barrier;
libxsmm_bsmmfunction fwd_config_kernel;
libxsmm_bsmmfunction tilerelease_kernel;
libxsmm_bsmmfunction_reducebatch_strd gemm_fwd;
libxsmm_bsmmfunction_reducebatch_strd gemm_fwd2;
libxsmm_bmmfunction_reducebatch_strd gemm_fwd3;
libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd4;
libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd5;
libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd6;
libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd7;
libxsmm_bmmfunction_reducebatch_strd_meltwfused gemm_fwd8;
libxsmm_meltwfunction_unary fwd_cvtfp32bf16_kernel;
libxsmm_meltwfunction_unary fwd_cvtfp32bf16_relu_kernel;
libxsmm_meltwfunction_unary fwd_sigmoid_cvtfp32bf16_kernel;
libxsmm_meltwfunction_unary fwd_zero_kernel;
libxsmm_meltwfunction_unary fwd_copy_bf16fp32_kernel;
libxsmm_meltwfunction_unary fwd_colbcast_bf16fp32_copy_kernel;
} my_fc_fwd_config;
typedef struct my_fc_bwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint K;
libxsmm_blasint bn;
libxsmm_blasint bc;
libxsmm_blasint bk;
libxsmm_blasint threads;
my_eltwise_fuse fuse_type;
libxsmm_blasint bwd_bf;
libxsmm_blasint bwd_2d_blocking;
libxsmm_blasint bwd_col_teams;
libxsmm_blasint bwd_row_teams;
libxsmm_blasint upd_bf;
libxsmm_blasint upd_2d_blocking;
libxsmm_blasint upd_col_teams;
libxsmm_blasint upd_row_teams;
libxsmm_blasint ifm_subtasks;
libxsmm_blasint ofm_subtasks;
size_t scratch_size;
size_t doutput_scratch_mark;
libxsmm_barrier* barrier;
libxsmm_bsmmfunction bwd_config_kernel;
libxsmm_bsmmfunction upd_config_kernel;
libxsmm_bsmmfunction tilerelease_kernel;
libxsmm_bsmmfunction_reducebatch_strd gemm_bwd;
libxsmm_bsmmfunction_reducebatch_strd gemm_bwd2;
libxsmm_bmmfunction_reducebatch_strd gemm_bwd3;
libxsmm_bsmmfunction_reducebatch_strd gemm_upd;
libxsmm_bsmmfunction_reducebatch_strd gemm_upd2;
libxsmm_bmmfunction_reducebatch_strd gemm_upd3;
libxsmm_meltwfunction_unary bwd_cvtfp32bf16_kernel;
libxsmm_meltwfunction_cvtfp32bf16 upd_cvtfp32bf16_kernel;
libxsmm_meltwfunction_unary bwd_relu_kernel;
libxsmm_meltwfunction_unary bwd_zero_kernel;
libxsmm_meltwfunction_unary upd_zero_kernel;
libxsmm_meltwfunction_unary delbias_reduce_kernel;
libxsmm_meltwfunction_unary vnni_to_vnniT_kernel;
libxsmm_meltwfunction_unary norm_to_normT_kernel;
libxsmm_meltwfunction_unary norm_to_vnni_kernel;
} my_fc_bwd_config;
typedef struct my_numa_thr_cfg {
int thr_s;
int thr_e;
int *blocksOFm_s;
int *blocksOFm_e;
int *blocksIFm_s;
int *blocksIFm_e;
libxsmm_bfloat16 **scratch;
size_t *layer_size;
libxsmm_bfloat16 **bwd_d_scratch;
size_t *bwd_d_layer_size;
libxsmm_bfloat16 **bwd_w_scratch;
size_t *bwd_w_layer_size;
} my_numa_thr_cfg;
my_fc_fwd_config setup_my_fc_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn,
libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) {
my_fc_fwd_config res;
libxsmm_blasint lda = bk;
libxsmm_blasint ldb = bc;
libxsmm_blasint ldc = bk;
libxsmm_blasint ld_zero = bk*bn;
libxsmm_blasint ld_upconvert = K;
float alpha = 1.0f;
float beta = 1.0f;
float zerobeta = 0.0f;
libxsmm_meltw_flags fusion_flags;
int l_flags, l_tc_flags;
int l_tr_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') );
libxsmm_blasint unroll_hint;
/* setting up some handle values */
res.N = N;
res.C = C;
res.K = K;
res.bn = bn;
res.bc = bc;
res.bk = bk;
res.threads = threads;
res.fuse_type = fuse_type;
/* setup parallelization strategy */
if (threads == 16) {
res.fwd_bf = 1;
res.fwd_2d_blocking = 1;
res.fwd_col_teams = 2;
res.fwd_row_teams = 8;
} else {
res.fwd_bf = 1;
res.fwd_2d_blocking = 0;
res.fwd_col_teams = 1;
res.fwd_row_teams = 1;
}
#if 0
res.fwd_bf = atoi(getenv("FWD_BF"));
res.fwd_2d_blocking = atoi(getenv("FWD_2D_BLOCKING"));
res.fwd_col_teams = atoi(getenv("FWD_COL_TEAMS"));
res.fwd_row_teams = atoi(getenv("FWD_ROW_TEAMS"));
#endif
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* TPP creation */
l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG;
l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') );
unroll_hint = (res.C/res.bc)/res.fwd_bf;
res.fwd_config_kernel = libxsmm_bsmmdispatch(res.bk, res.bn, res.bc, &lda, &ldb, &ldc, NULL, &beta, &l_tc_flags, NULL);
if ( res.fwd_config_kernel == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP fwd_config_kernel failed. Bailing...!\n");
exit(-1);
}
res.gemm_fwd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL);
if ( res.gemm_fwd == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd failed. Bailing...!\n");
exit(-1);
}
res.gemm_fwd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL);
if ( res.gemm_fwd2 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd2 failed. Bailing...!\n");
exit(-1);
}
res.gemm_fwd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL);
if ( res.gemm_fwd3 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd3 failed. Bailing...!\n");
exit(-1);
}
fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_OVERWRITE_C;
res.gemm_fwd4 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0);
if ( res.gemm_fwd4 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd4 failed. Bailing...!\n");
exit(-1);
}
fusion_flags = LIBXSMM_MELTW_FLAG_ACT_RELU_OVERWRITE_C;
res.gemm_fwd5 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0);
if ( res.gemm_fwd5 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd5 failed. Bailing...!\n");
exit(-1);
}
fusion_flags = LIBXSMM_MELTW_FLAG_ACT_SIGM_OVERWRITE_C;
res.gemm_fwd6 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0);
if ( res.gemm_fwd6 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd6 failed. Bailing...!\n");
exit(-1);
}
fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_ACT_RELU_OVERWRITE_C;
res.gemm_fwd7 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0);
if ( res.gemm_fwd7 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd7 failed. Bailing...!\n");
exit(-1);
}
fusion_flags = LIBXSMM_MELTW_FLAG_COLBIAS_ACT_SIGM_OVERWRITE_C;
res.gemm_fwd8 = libxsmm_bmmdispatch_reducebatch_strd_meltwfused_unroll(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL, LIBXSMM_MELTW_OPERATION_COLBIAS_ACT, LIBXSMM_DATATYPE_F32, fusion_flags, 0, 0, 0, 0);
if ( res.gemm_fwd8 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd8 failed. Bailing...!\n");
exit(-1);
}
/* Also JIT eltwise TPPs... */
res.fwd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY);
if ( res.fwd_cvtfp32bf16_kernel == NULL ) {
fprintf( stderr, "JIT for TPP fwd_cvtfp32bf16_kernel failed. Bailing...!\n");
exit(-1);
}
res.fwd_cvtfp32bf16_relu_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT, LIBXSMM_MELTW_TYPE_UNARY_RELU);
if ( res.fwd_cvtfp32bf16_relu_kernel == NULL ) {
fprintf( stderr, "JIT for TPP fwd_cvtfp32bf16_relu_kernel failed. Bailing...!\n");
exit(-1);
}
res.fwd_sigmoid_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bk, res.bn, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_SIGMOID);
if ( res.fwd_sigmoid_cvtfp32bf16_kernel == NULL ) {
fprintf( stderr, "JIT for TPP fwd_sigmoid_cvtfp32bf16_kernel failed. Bailing...!\n");
exit(-1);
}
res.tilerelease_kernel = libxsmm_bsmmdispatch(res.bk, res.bk, res.bk, NULL, NULL, NULL, NULL, NULL, &l_tr_flags, NULL);
if ( res.tilerelease_kernel == NULL ) {
fprintf( stderr, "JIT for TPP tilerelease_kernel failed. Bailing...!\n");
exit(-1);
}
res.fwd_zero_kernel = libxsmm_dispatch_meltw_unary(bn*bk, 1, &ld_zero, &ld_zero, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR);
if ( res.fwd_zero_kernel == NULL ) {
fprintf( stderr, "JIT for TPP fwd_zero_kernel failed. Bailing...!\n");
exit(-1);
}
res.fwd_colbcast_bf16fp32_copy_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &ldc, &ldc, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_BCAST_COL, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY );
if ( res.fwd_colbcast_bf16fp32_copy_kernel == NULL ) {
fprintf( stderr, "JIT for TPP fwd_colbcast_bf16fp32_copy_kernel failed. Bailing...!\n");
exit(-1);
}
res.fwd_copy_bf16fp32_kernel = libxsmm_dispatch_meltw_unary(K, 1, &ld_upconvert, &ld_upconvert, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY);
if ( res.fwd_copy_bf16fp32_kernel == NULL ) {
fprintf( stderr, "JIT for TPP fwd_copy_bf16fp32_kernel failed. Bailing...!\n");
exit(-1);
}
/* init scratch */
res.scratch_size = sizeof(float) * LIBXSMM_MAX(res.K * res.N, res.threads * LIBXSMM_MAX(res.bk * res.bn, res.K));
return res;
}
my_fc_bwd_config setup_my_fc_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn,
libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) {
my_fc_bwd_config res;
libxsmm_blasint lda = bk;
libxsmm_blasint ldb = bc;
libxsmm_blasint ldc = bk;
libxsmm_blasint ld_zero_bwd = bc*bn;
libxsmm_blasint ld_zero_upd = bk;
libxsmm_blasint delbias_K = K;
libxsmm_blasint delbias_N = N;
float alpha = 1.0f;
float beta = 1.0f;
float zerobeta = 0.0f;
libxsmm_blasint updM;
libxsmm_blasint updN;
int l_flags, l_tc_flags;
int l_tr_flags = LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') );
libxsmm_blasint unroll_hint;
size_t size_bwd_scratch;
size_t size_upd_scratch;
libxsmm_blasint bbk;
libxsmm_blasint bbc;
libxsmm_blasint ldaT = bc;
libxsmm_blasint ldb_orig= bc;
/* setting up some handle values */
res.N = N;
res.C = C;
res.K = K;
res.bn = bn;
res.bc = bc;
res.bk = bk;
res.threads = threads;
res.fuse_type = fuse_type;
/* setup parallelization strategy */
if (threads == 16) {
res.bwd_bf = 1;
res.bwd_2d_blocking = 1;
res.bwd_col_teams = 2;
res.bwd_row_teams = 8;
res.upd_bf = 1;
res.upd_2d_blocking = 0;
res.upd_col_teams = 1;
res.upd_row_teams = 1;
res.ifm_subtasks = 1;
res.ofm_subtasks = 1;
} else {
res.bwd_bf = 1;
res.bwd_2d_blocking = 0;
res.bwd_col_teams = 1;
res.bwd_row_teams = 1;
res.upd_bf = 1;
res.upd_2d_blocking = 0;
res.upd_col_teams = 1;
res.upd_row_teams = 1;
res.ifm_subtasks = 1;
res.ofm_subtasks = 1;
}
bbk = (res.upd_2d_blocking == 1) ? bk : bk/res.ofm_subtasks;
bbc = (res.upd_2d_blocking == 1) ? bc : bc/res.ifm_subtasks;
#if 0
res.bwd_bf = atoi(getenv("BWD_BF"));
res.bwd_2d_blocking = atoi(getenv("BWD_2D_BLOCKING"));
res.bwd_col_teams = atoi(getenv("BWD_COL_TEAMS"));
res.bwd_row_teams = atoi(getenv("BWD_ROW_TEAMS"));
res.upd_bf = atoi(getenv("UPD_BF"));
res.upd_2d_blocking = atoi(getenv("UPD_2D_BLOCKING"));
res.upd_col_teams = atoi(getenv("UPD_COL_TEAMS"));
res.upd_row_teams = atoi(getenv("UPD_ROW_TEAMS"));
res.ifm_subtasks = atoi(getenv("IFM_SUBTASKS"));
res.ofm_subtasks = atoi(getenv("OFM_SUBTASKS"));
#endif
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* TPP creation */
/* BWD GEMM */
l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG;
l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') );
unroll_hint = (res.K/res.bk)/res.bwd_bf;
res.gemm_bwd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &beta, &l_flags, NULL);
if ( res.gemm_bwd == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd failed. Bailing...!\n");
exit(-1);
}
res.gemm_bwd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL);
if ( res.gemm_bwd2 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd2 failed. Bailing...!\n");
exit(-1);
}
res.gemm_bwd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(libxsmm_bfloat16), res.bk*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &ldb, &lda, &ldb, &alpha, &zerobeta, &l_flags, NULL);
if ( res.gemm_bwd3 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd3 failed. Bailing...!\n");
exit(-1);
}
res.bwd_config_kernel = libxsmm_bsmmdispatch(res.bc, res.bn, res.bk, &ldb, &lda, &ldb, NULL, &beta, &l_tc_flags, NULL);
if ( res.bwd_config_kernel == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP bwd_config_kernel failed. Bailing...!\n");
exit(-1);
}
/* Also JIT eltwise TPPs... */
res.bwd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn, &ldb, &ldb, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY);
if ( res.bwd_cvtfp32bf16_kernel == NULL ) {
fprintf( stderr, "JIT for TPP bwd_cvtfp32bf16_kernel failed. Bailing...!\n");
exit(-1);
}
res.bwd_relu_kernel = libxsmm_dispatch_meltw_unary(res.bc, res.bn,&ldb, &ldb, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_BITMASK_2BYTEMULT, LIBXSMM_MELTW_TYPE_UNARY_RELU_INV);
if ( res.bwd_relu_kernel == NULL ) {
fprintf( stderr, "JIT for TPP bwd_relu_kernel failed. Bailing...!\n");
exit(-1);
}
res.bwd_zero_kernel = libxsmm_dispatch_meltw_unary(bn*bc, 1, &ld_zero_bwd, &ld_zero_bwd, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR);
if ( res.bwd_zero_kernel == NULL ) {
fprintf( stderr, "JIT for TPP bwd_zero_kernel failed. Bailing...!\n");
exit(-1);
}
/* JITing the tranpose kernel */
res.vnni_to_vnniT_kernel = libxsmm_dispatch_meltw_unary(bk, bc, &lda, &ldaT, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_VNNI_TO_VNNIT);
if ( res.vnni_to_vnniT_kernel == NULL ) {
fprintf( stderr, "JIT for TPP vnni_to_vnniT_kernel failed. Bailing...!\n");
exit(-1);
}
/* UPD GEMM */
lda = res.bk;
ldb = res.bn;
ldc = res.bk;
updM = res.bk/res.ofm_subtasks;
updN = res.bc/res.ifm_subtasks;
l_flags = ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') ) | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG;
l_tc_flags = LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG | ( LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') );
unroll_hint = (res.N/res.bn)/res.upd_bf;
res.gemm_upd = libxsmm_bsmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &beta, &l_flags, NULL);
if ( res.gemm_upd == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_upd failed. Bailing...!\n");
exit(-1);
}
res.gemm_upd2 = libxsmm_bsmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL);
if ( res.gemm_upd2 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_upd2 failed. Bailing...!\n");
exit(-1);
}
l_flags = l_flags | LIBXSMM_GEMM_FLAG_VNNI_C;
res.gemm_upd3 = libxsmm_bmmdispatch_reducebatch_strd_unroll(updM, updN, res.bn, res.bk*res.bn*sizeof(libxsmm_bfloat16), res.bc*res.bn*sizeof(libxsmm_bfloat16), unroll_hint, &lda, &ldb, &ldc, &alpha, &zerobeta, &l_flags, NULL);
if ( res.gemm_upd3 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_upd3 failed. Bailing...!\n");
exit(-1);
}
res.upd_config_kernel = libxsmm_bsmmdispatch(updM, updN, res.bn, &lda, &ldb, &ldc, NULL, &beta, &l_tc_flags, NULL);
if ( res.upd_config_kernel == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP upd_config_kernel failed. Bailing...!\n");
exit(-1);
}
res.tilerelease_kernel = libxsmm_bsmmdispatch(res.bk, res.bk, res.bk, NULL, NULL, NULL, NULL, NULL, &l_tr_flags, NULL);
if ( res.tilerelease_kernel == NULL ) {
fprintf( stderr, "JIT for TPP tilerelease_kernel failed. Bailing...!\n");
exit(-1);
}
/* Also JIT eltwise TPPs... */
res.upd_cvtfp32bf16_kernel = libxsmm_dispatch_meltw_cvtfp32bf16(bbk, bbc, &ldc, &ldc, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_CVT_VNNI_FORMAT);
if ( res.upd_cvtfp32bf16_kernel == NULL ) {
fprintf( stderr, "JIT for TPP upd_cvtfp32bf16_kernel failed. Bailing...!\n");
exit(-1);
}
res.upd_zero_kernel = libxsmm_dispatch_meltw_unary(bbk, bbc, &ld_zero_upd, &ld_zero_upd, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR);
if ( res.upd_zero_kernel == NULL ) {
fprintf( stderr, "JIT for TPP upd_zero_kernel failed. Bailing...!\n");
exit(-1);
}
res.delbias_reduce_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &delbias_K, &delbias_N, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD_NCNC_FORMAT);
if( res.delbias_reduce_kernel == NULL ) {
fprintf( stderr, "JIT for TPP delbias_reduce_kernel failed. Bailing...!\n");
exit(-1);
}
/* JITing the tranpose kernels */
res.norm_to_vnni_kernel = libxsmm_dispatch_meltw_unary(bk, bn, &lda, &lda, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_VNNI);
if ( res.norm_to_vnni_kernel == NULL ) {
fprintf( stderr, "JIT for TPP norm_to_vnni_kernel failed. Bailing...!\n");
exit(-1);
}
res.norm_to_normT_kernel = libxsmm_dispatch_meltw_unary(bc, bn, &ldb, &ldb_orig, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_DATATYPE_BF16, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_TRANSFORM_NORM_TO_NORMT);
if ( res.norm_to_normT_kernel == NULL ) {
fprintf( stderr, "JIT for TPP norm_to_normT_kernel failed. Bailing...!\n");
exit(-1);
}
/* init scratch */
size_bwd_scratch = sizeof(float) * LIBXSMM_MAX(res.C * res.N, res.threads * res.bc * res.bn) + sizeof(libxsmm_bfloat16) * res.C * res.K;
size_upd_scratch = sizeof(float) * LIBXSMM_MAX(res.C * res.K, res.threads * res.bc * res.bk) + sizeof(libxsmm_bfloat16) * res.threads * res.bk * res.bc + sizeof(libxsmm_bfloat16) * (res.N * (res.C + res.K));
#ifdef OVERWRITE_DOUTPUT_BWDUPD
res.scratch_size = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) + sizeof(libxsmm_bfloat16) * res.N * res.K;
#else
res.scratch_size = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) + 2 * sizeof(libxsmm_bfloat16) * res.N * res.K;
#endif
res.doutput_scratch_mark = LIBXSMM_MAX(size_bwd_scratch, size_upd_scratch) ;
return res;
}
my_opt_config setup_my_opt(libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bc, libxsmm_blasint bk,
libxsmm_blasint threads, float lr) {
my_opt_config res;
/* setting up some handle values */
res.C = C;
res.K = K;
res.bc = bc;
res.bk = bk;
res.threads = threads;
res.lr = lr;
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* init scratch */
res.scratch_size = 0;
return res;
}
my_smax_fwd_config setup_my_smax_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc,
libxsmm_blasint threads) {
my_smax_fwd_config res;
/* setting up some handle values */
res.C = C;
res.N = N;
res.bc = bc;
res.bn = bn;
res.threads = threads;
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* init scratch */
res.scratch_size = (sizeof(float)*res.C*res.N*2);;
return res;
}
my_smax_bwd_config setup_my_smax_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc,
libxsmm_blasint threads, float loss_weight) {
my_smax_bwd_config res;
/* setting up some handle values */
res.C = C;
res.N = N;
res.bc = bc;
res.bn = bn;
res.threads = threads;
res.loss_weight = loss_weight;
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* init scratch */
res.scratch_size = (sizeof(float)*res.C*res.N*2);;
return res;
}
void my_fc_fwd_exec( my_fc_fwd_config cfg, const libxsmm_bfloat16* wt_ptr, const libxsmm_bfloat16* in_act_ptr, libxsmm_bfloat16* out_act_ptr,
const libxsmm_bfloat16* bias_ptr, unsigned char* relu_ptr, int start_tid, int my_tid, void* scratch, my_numa_thr_cfg *numa_thr_cfg, int layer ) {
const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc;
const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk;
const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn;
const libxsmm_blasint bn = cfg.bn;
const libxsmm_blasint bk = cfg.bk;
const libxsmm_blasint lpb = 2;
const libxsmm_blasint bc_lp = cfg.bc/lpb;
/* const libxsmm_blasint bc = cfg.bc;*/
libxsmm_blasint use_2d_blocking = cfg.fwd_2d_blocking;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could be run in parallel */
const libxsmm_blasint work = nBlocksOFm * nBlocksMB;
/* compute chunk size */
const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
/* loop variables */
libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ifm1 = 0;
libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0;
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, output, out_act_ptr, nBlocksOFm, cfg.bn, cfg.bk);
LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, input, in_act_ptr, nBlocksIFm, cfg.bn, cfg.bc);
LIBXSMM_VLA_DECL(5, const libxsmm_bfloat16, filter, wt_ptr, nBlocksIFm, bc_lp, cfg.bk, lpb);
LIBXSMM_VLA_DECL(4, float, output_f32, (float*)scratch, nBlocksOFm, bn, bk);
libxsmm_meltw_gemm_param gemm_eltwise_params;
float* fp32_bias_scratch = ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (float*)scratch + ltid * cfg.K : NULL;
LIBXSMM_VLA_DECL(2, const libxsmm_bfloat16, bias, ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (libxsmm_bfloat16*) bias_ptr : NULL, cfg.bk);
LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32*)relu_ptr : NULL, nBlocksOFm, cfg.bn, cfg.bk/32);
libxsmm_meltwfunction_unary eltwise_kernel_act = cfg.fwd_cvtfp32bf16_relu_kernel;
libxsmm_meltw_unary_param eltwise_params_act;
libxsmm_meltwfunction_unary eltwise_kernel = cfg.fwd_cvtfp32bf16_kernel;
libxsmm_meltw_unary_param eltwise_params;
libxsmm_bmmfunction_reducebatch_strd_meltwfused bf16_batchreduce_kernel_zerobeta_fused_eltwise;
libxsmm_meltw_unary_param copy_params;
unsigned long long blocks = nBlocksIFm;
libxsmm_blasint CB_BLOCKS = nBlocksIFm, BF = 1;
if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) && ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU )) {
bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd7;
} else if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) {
bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd4;
} else if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) {
bf16_batchreduce_kernel_zerobeta_fused_eltwise = cfg.gemm_fwd5;
} else {
bf16_batchreduce_kernel_zerobeta_fused_eltwise = NULL;
}
BF = cfg.fwd_bf;
CB_BLOCKS = nBlocksIFm/BF;
blocks = CB_BLOCKS;
if (use_2d_blocking == 1) {
col_teams = cfg.fwd_col_teams;
row_teams = cfg.fwd_row_teams;
my_row_id = ltid % row_teams;
my_col_id = ltid / row_teams;
N_tasks_per_thread = (nBlocksMB + col_teams-1)/col_teams;
M_tasks_per_thread = (nBlocksOFm + row_teams-1)/row_teams;
my_N_start = LIBXSMM_MIN( my_col_id * N_tasks_per_thread, nBlocksMB);
my_N_end = LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, nBlocksMB);
my_M_start = LIBXSMM_MIN( my_row_id * M_tasks_per_thread, nBlocksOFm);
my_M_end = LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, nBlocksOFm);
}
const libxsmm_blasint ofm_start = numa_thr_cfg->blocksOFm_s[layer];
/* lazy barrier init */
libxsmm_barrier_init(cfg.barrier, ltid);
cfg.fwd_config_kernel(NULL, NULL, NULL);
if (use_2d_blocking == 1) {
if (BF > 1) {
for ( ifm1 = 0; ifm1 < BF; ++ifm1 ) {
for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) {
for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) {
if ( ifm1 == 0 ) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0,cfg.bk);
copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm,cfg.bn,cfg.bk);
cfg.fwd_colbcast_bf16fp32_copy_kernel(©_params);
} else {
copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
cfg.fwd_zero_kernel(©_params);
}
}
cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb),
&LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks);
if ( ifm1 == BF-1 ) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) {
eltwise_params_act.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
eltwise_params_act.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
eltwise_params_act.out.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32);
eltwise_kernel_act(&eltwise_params_act);
} else {
eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
eltwise_kernel(&eltwise_params);
}
}
}
}
}
} else {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, 0, 0,cfg.bk);
copy_params.out.primary = fp32_bias_scratch;
cfg.fwd_copy_bf16fp32_kernel(©_params);
}
for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) {
for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) {
if ( ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) || ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU )) {
if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) {
gemm_eltwise_params.bias_ptr = (float*) fp32_bias_scratch + ofm1 * cfg.bk;
}
if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) {
gemm_eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32);
}
bf16_batchreduce_kernel_zerobeta_fused_eltwise( &LIBXSMM_VLA_ACCESS(5, filter, ofm1-ofm_start, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb),
&LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks, &gemm_eltwise_params);
} else {
cfg.gemm_fwd3( &LIBXSMM_VLA_ACCESS(5, filter, ofm1-ofm_start, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb),
&LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks);
}
}
}
}
} else {
if (BF > 1) {
for ( ifm1 = 0; ifm1 < BF; ++ifm1 ) {
for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) {
mb1 = mb1ofm1%nBlocksMB;
ofm1 = mb1ofm1/nBlocksMB;
/* Initialize libxsmm_blasintermediate f32 tensor */
if ( ifm1 == 0 ) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, ofm1, 0,cfg.bk);
copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm,cfg.bn,cfg.bk);
cfg.fwd_colbcast_bf16fp32_copy_kernel(©_params);
} else {
copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
cfg.fwd_zero_kernel(©_params);
}
}
cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb),
&LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks);
if ( ifm1 == BF-1 ) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) {
eltwise_params_act.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
eltwise_params_act.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
eltwise_params_act.out.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32);
eltwise_kernel_act(&eltwise_params_act);
} else {
eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, output_f32, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
eltwise_kernel(&eltwise_params);
}
}
}
}
} else {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
copy_params.in.primary = (void*) &LIBXSMM_VLA_ACCESS(2, bias, 0, 0,cfg.bk);
copy_params.out.primary = fp32_bias_scratch;
cfg.fwd_copy_bf16fp32_kernel(©_params);
}
for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) {
mb1 = mb1ofm1%nBlocksMB;
ofm1 = mb1ofm1/nBlocksMB;
if ( ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) || ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU )) {
if ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) {
gemm_eltwise_params.bias_ptr = (float*) fp32_bias_scratch + ofm1 * cfg.bk;
}
if ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) {
gemm_eltwise_params.out_ptr = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32);
}
bf16_batchreduce_kernel_zerobeta_fused_eltwise( &LIBXSMM_VLA_ACCESS(5, filter, ofm1-ofm_start, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb),
&LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks, &gemm_eltwise_params);
} else {
cfg.gemm_fwd3( &LIBXSMM_VLA_ACCESS(5, filter, ofm1-ofm_start, 0, 0, 0, 0, nBlocksIFm, bc_lp, cfg.bk, lpb),
&LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk), &blocks);
}
}
}
}
cfg.tilerelease_kernel(NULL, NULL, NULL);
libxsmm_barrier_wait(cfg.barrier, ltid);
}
void my_fc_bwd_exec( my_fc_bwd_config cfg, const libxsmm_bfloat16* wt_ptr, libxsmm_bfloat16* din_act_ptr,
const libxsmm_bfloat16* dout_act_ptr, libxsmm_bfloat16* dwt_ptr, const libxsmm_bfloat16* in_act_ptr,
libxsmm_bfloat16* dbias_ptr, const unsigned char* relu_ptr, my_pass pass, int start_tid, int my_tid, void* scratch ) {
/* size variables, all const */
/* here we assume that input and output blocking is similar */
const libxsmm_blasint bn = cfg.bn;
const libxsmm_blasint bk = cfg.bk;
const libxsmm_blasint bc = cfg.bc;
libxsmm_blasint lpb = 2;
const libxsmm_blasint bc_lp = bc/lpb;
const libxsmm_blasint bk_lp = bk/lpb;
const libxsmm_blasint bn_lp = bn/lpb;
const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc;
const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk;
const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn;
libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ofm2 = 0;
libxsmm_blasint performed_doutput_transpose = 0;
libxsmm_meltw_unary_param trans_param;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks for transpose that could be run in parallel */
const libxsmm_blasint eltwise_work = nBlocksOFm * nBlocksMB;
/* compute chunk size */
const libxsmm_blasint eltwise_chunksize = (eltwise_work % cfg.threads == 0) ? (eltwise_work / cfg.threads) : ((eltwise_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint eltwise_thr_begin = (ltid * eltwise_chunksize < eltwise_work) ? (ltid * eltwise_chunksize) : eltwise_work;
const libxsmm_blasint eltwise_thr_end = ((ltid + 1) * eltwise_chunksize < eltwise_work) ? ((ltid + 1) * eltwise_chunksize) : eltwise_work;
/* number of tasks for transpose that could be run in parallel */
const libxsmm_blasint dbias_work = nBlocksOFm;
/* compute chunk size */
const libxsmm_blasint dbias_chunksize = (dbias_work % cfg.threads == 0) ? (dbias_work / cfg.threads) : ((dbias_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint dbias_thr_begin = (ltid * dbias_chunksize < dbias_work) ? (ltid * dbias_chunksize) : dbias_work;
const libxsmm_blasint dbias_thr_end = ((ltid + 1) * dbias_chunksize < dbias_work) ? ((ltid + 1) * dbias_chunksize) : dbias_work;
LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, dbias, ((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS) ? (libxsmm_bfloat16*) dbias_ptr : NULL, cfg.bk);
LIBXSMM_VLA_DECL(4, __mmask32, relubitmask, ((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? (__mmask32*)relu_ptr : NULL, nBlocksOFm, cfg.bn, cfg.bk/32);
#ifdef OVERWRITE_DOUTPUT_BWDUPD
libxsmm_bfloat16 *grad_output_ptr = (libxsmm_bfloat16*)dout_act_ptr;
libxsmm_bfloat16 *tr_doutput_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) ? (libxsmm_bfloat16*)((char*)scratch + cfg.doutput_scratch_mark) : (libxsmm_bfloat16*)scratch;
#else
libxsmm_bfloat16 *grad_output_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) ? (libxsmm_bfloat16*)((char*)scratch + cfg.doutput_scratch_mark) : (libxsmm_bfloat16*)dout_act_ptr;
libxsmm_bfloat16 *tr_doutput_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) ? (libxsmm_bfloat16*)grad_output_ptr + cfg.N * cfg.K : (libxsmm_bfloat16*)scratch;
#endif
LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, doutput_orig, (libxsmm_bfloat16*)dout_act_ptr, nBlocksOFm, bn, bk);
libxsmm_meltw_unary_param relu_params;
libxsmm_meltwfunction_unary relu_kernel = cfg.bwd_relu_kernel;
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, doutput, grad_output_ptr, nBlocksOFm, bn, bk);
LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, doutput_tr, tr_doutput_ptr, nBlocksMB, bn_lp, bk, lpb);
libxsmm_meltwfunction_unary eltwise_kernel = cfg.bwd_cvtfp32bf16_kernel;
libxsmm_meltwfunction_cvtfp32bf16 eltwise_kernel2 = cfg.upd_cvtfp32bf16_kernel;
libxsmm_meltw_unary_param eltwise_params;
libxsmm_meltw_unary_param copy_params;
libxsmm_meltw_unary_param delbias_params;
/* lazy barrier init */
libxsmm_barrier_init(cfg.barrier, ltid);
cfg.bwd_config_kernel(NULL, NULL, NULL);
/* Apply to doutput potential fusions */
if (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU)) {
for ( mb1ofm1 = eltwise_thr_begin; mb1ofm1 < eltwise_thr_end; ++mb1ofm1 ) {
mb1 = mb1ofm1/nBlocksOFm;
ofm1 = mb1ofm1%nBlocksOFm;
relu_params.in.primary =(void*) &LIBXSMM_VLA_ACCESS(4, doutput_orig, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
relu_params.out.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
relu_params.in.secondary = &LIBXSMM_VLA_ACCESS(4, relubitmask, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk/32);
relu_kernel(&relu_params);
/* If in UPD pass, also perform transpose of doutput */
if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) {
trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk);
trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, mb1, 0, 0, 0, nBlocksMB, bn_lp, bk, lpb);
cfg.norm_to_vnni_kernel(&trans_param);
}
}
if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) {
performed_doutput_transpose = 1;
}
libxsmm_barrier_wait(cfg.barrier, ltid);
}
/* Accumulation of bias happens in f32 */
if (((cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS)) {
for ( ofm1 = dbias_thr_begin; ofm1 < dbias_thr_end; ++ofm1 ) {
delbias_params.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk);
delbias_params.out.primary = &LIBXSMM_VLA_ACCESS(2, dbias, ofm1, 0, cfg.bk);
cfg.delbias_reduce_kernel(&delbias_params);
}
/* wait for eltwise to finish */
libxsmm_barrier_wait(cfg.barrier, ltid);
}
if ( (pass & MY_PASS_BWD_D) == MY_PASS_BWD_D ){
libxsmm_blasint use_2d_blocking = cfg.bwd_2d_blocking;
/* number of tasks that could be run in parallel */
const libxsmm_blasint work = nBlocksIFm * nBlocksMB;
/* compute chunk size */
const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
/* number of tasks for transpose that could be run in parallel */
const libxsmm_blasint transpose_work = nBlocksIFm * nBlocksOFm;
/* compute chunk size */
const libxsmm_blasint transpose_chunksize = (transpose_work % cfg.threads == 0) ? (transpose_work / cfg.threads) : ((transpose_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint transpose_thr_begin = (ltid * transpose_chunksize < transpose_work) ? (ltid * transpose_chunksize) : transpose_work;
const libxsmm_blasint transpose_thr_end = ((ltid + 1) * transpose_chunksize < transpose_work) ? ((ltid + 1) * transpose_chunksize) : transpose_work;
/* loop variables */
libxsmm_blasint ifm1 = 0, ifm1ofm1 = 0, mb1ifm1 = 0;
libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0;
LIBXSMM_VLA_DECL(5, const libxsmm_bfloat16, filter, (libxsmm_bfloat16*)wt_ptr, nBlocksIFm, bc_lp, bk, lpb);
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dinput, (libxsmm_bfloat16* )din_act_ptr, nBlocksIFm, bn, bc);
LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, filter_tr, (libxsmm_bfloat16*)scratch, nBlocksOFm, bk_lp, bc, lpb);
float* temp_output = (float*)scratch + (cfg.C * cfg.K)/2;
LIBXSMM_VLA_DECL(4, float, dinput_f32, (float*) temp_output, nBlocksIFm, bn, bc);
unsigned long long blocks = nBlocksOFm;
libxsmm_blasint KB_BLOCKS = nBlocksOFm, BF = 1;
BF = cfg.bwd_bf;
KB_BLOCKS = nBlocksOFm/BF;
blocks = KB_BLOCKS;
if (use_2d_blocking == 1) {
col_teams = cfg.bwd_col_teams;
row_teams = cfg.bwd_row_teams;
my_row_id = ltid % row_teams;
my_col_id = ltid / row_teams;
N_tasks_per_thread = (nBlocksMB + col_teams-1)/col_teams;
M_tasks_per_thread = (nBlocksIFm + row_teams-1)/row_teams;
my_N_start = LIBXSMM_MIN( my_col_id * N_tasks_per_thread, nBlocksMB);
my_N_end = LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, nBlocksMB);
my_M_start = LIBXSMM_MIN( my_row_id * M_tasks_per_thread, nBlocksIFm);
my_M_end = LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, nBlocksIFm);
}
/* transpose weight */
for (ifm1ofm1 = transpose_thr_begin; ifm1ofm1 < transpose_thr_end; ++ifm1ofm1) {
ofm1 = ifm1ofm1 / nBlocksIFm;
ifm1 = ifm1ofm1 % nBlocksIFm;
trans_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(5, filter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb);
trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb);
cfg.vnni_to_vnniT_kernel(&trans_param);
}
/* wait for transpose to finish */
libxsmm_barrier_wait(cfg.barrier, ltid);
if (use_2d_blocking == 1) {
if (BF > 1) {
for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) {
for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) {
for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) {
/* Initialize libxsmm_blasintermediate f32 tensor */
if ( ofm1 == 0 ) {
copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc);
cfg.bwd_zero_kernel(©_params);
}
cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb),
&LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks);
/* downconvert libxsmm_blasintermediate f32 tensor to bf 16 and store to final C */
if ( ofm1 == BF-1 ) {
eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc);
eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc);
eltwise_kernel(&eltwise_params);
}
}
}
}
} else {
for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) {
for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) {
cfg.gemm_bwd3( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, 0, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb),
&LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks);
}
}
}
} else {
if (BF > 1) {
for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) {
for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) {
mb1 = mb1ifm1%nBlocksMB;
ifm1 = mb1ifm1/nBlocksMB;
/* Initialize libxsmm_blasintermediate f32 tensor */
if ( ofm1 == 0 ) {
copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc);
cfg.bwd_zero_kernel(©_params);
}
cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb),
&LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks);
/* downconvert libxsmm_blasintermediate f32 tensor to bf 16 and store to final C */
if ( ofm1 == BF-1 ) {
eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dinput_f32, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc);
eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc);
eltwise_kernel(&eltwise_params);
}
}
}
} else {
for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) {
mb1 = mb1ifm1%nBlocksMB;
ifm1 = mb1ifm1/nBlocksMB;
cfg.gemm_bwd3( &LIBXSMM_VLA_ACCESS(5, filter_tr, ifm1, 0, 0, 0, 0, nBlocksOFm, bk_lp, bc, lpb),
&LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks);
}
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
}
if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) {
/* number of tasks that could be run in parallel */
const libxsmm_blasint ofm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ofm_subtasks;
const libxsmm_blasint ifm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ifm_subtasks;
const libxsmm_blasint bbk = (cfg.upd_2d_blocking == 1) ? bk : bk/ofm_subtasks;
const libxsmm_blasint bbc = (cfg.upd_2d_blocking == 1) ? bc : bc/ifm_subtasks;
const libxsmm_blasint work = nBlocksIFm * ifm_subtasks * nBlocksOFm * ofm_subtasks;
const libxsmm_blasint Cck_work = nBlocksIFm * ifm_subtasks * ofm_subtasks;
const libxsmm_blasint Cc_work = nBlocksIFm * ifm_subtasks;
/* 2D blocking parameters */
libxsmm_blasint use_2d_blocking = cfg.upd_2d_blocking;
libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0;
/* compute chunk size */
const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
libxsmm_blasint BF = cfg.upd_bf;
/* loop variables */
libxsmm_blasint ifm1ofm1 = 0, ifm1 = 0, ifm2 = 0, bfn = 0, mb1ifm1 = 0;
/* Batch reduce related variables */
unsigned long long blocks = nBlocksMB/BF;
LIBXSMM_VLA_DECL(4, const libxsmm_bfloat16, input, (libxsmm_bfloat16* )in_act_ptr, nBlocksIFm, bn, bc);
LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, dfilter, (libxsmm_bfloat16*)dwt_ptr, nBlocksIFm, bc_lp, bk, lpb);
/* Set up tensors for transposing/scratch before vnni reformatting dfilter */
libxsmm_bfloat16 *tr_inp_ptr = (libxsmm_bfloat16*) ((libxsmm_bfloat16*)scratch + cfg.N * cfg.K);
float *dfilter_f32_ptr = (float*) ((libxsmm_bfloat16*)tr_inp_ptr + cfg.N * cfg.C);
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, input_tr, (libxsmm_bfloat16*)tr_inp_ptr, nBlocksMB, bc, bn);
LIBXSMM_VLA_DECL(4, float, dfilter_f32, (float*)dfilter_f32_ptr, nBlocksIFm, bc, bk);
const libxsmm_blasint tr_out_work = nBlocksMB * nBlocksOFm;
const libxsmm_blasint tr_out_chunksize = (tr_out_work % cfg.threads == 0) ? (tr_out_work / cfg.threads) : ((tr_out_work / cfg.threads) + 1);
const libxsmm_blasint tr_out_thr_begin = (ltid * tr_out_chunksize < tr_out_work) ? (ltid * tr_out_chunksize) : tr_out_work;
const libxsmm_blasint tr_out_thr_end = ((ltid + 1) * tr_out_chunksize < tr_out_work) ? ((ltid + 1) * tr_out_chunksize) : tr_out_work;
const libxsmm_blasint tr_inp_work = nBlocksMB * nBlocksIFm;
const libxsmm_blasint tr_inp_chunksize = (tr_inp_work % cfg.threads == 0) ? (tr_inp_work / cfg.threads) : ((tr_inp_work / cfg.threads) + 1);
const libxsmm_blasint tr_inp_thr_begin = (ltid * tr_inp_chunksize < tr_inp_work) ? (ltid * tr_inp_chunksize) : tr_inp_work;
const libxsmm_blasint tr_inp_thr_end = ((ltid + 1) * tr_inp_chunksize < tr_inp_work) ? ((ltid + 1) * tr_inp_chunksize) : tr_inp_work;
if (use_2d_blocking == 1) {
col_teams = cfg.upd_col_teams;
row_teams = cfg.upd_row_teams;
my_row_id = ltid % row_teams;
my_col_id = ltid / row_teams;
N_tasks_per_thread = (nBlocksIFm + col_teams-1)/col_teams;
M_tasks_per_thread = (nBlocksOFm + row_teams-1)/row_teams;
my_N_start = LIBXSMM_MIN( my_col_id * N_tasks_per_thread, nBlocksIFm);
my_N_end = LIBXSMM_MIN( (my_col_id+1) * N_tasks_per_thread, nBlocksIFm);
my_M_start = LIBXSMM_MIN( my_row_id * M_tasks_per_thread, nBlocksOFm);
my_M_end = LIBXSMM_MIN( (my_row_id+1) * M_tasks_per_thread, nBlocksOFm);
}
/* Required upfront tranposes */
for (mb1ifm1 = tr_inp_thr_begin; mb1ifm1 < tr_inp_thr_end; mb1ifm1++) {
mb1 = mb1ifm1%nBlocksMB;
ifm1 = mb1ifm1/nBlocksMB;
trans_param.in.primary = (void*)&LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc);
trans_param.out.primary = &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, mb1, 0, 0, nBlocksMB, bc, bn);
cfg.norm_to_normT_kernel(&trans_param);
}
if (performed_doutput_transpose == 0) {
for (mb1ofm1 = tr_out_thr_begin; mb1ofm1 < tr_out_thr_end; mb1ofm1++) {
mb1 = mb1ofm1%nBlocksMB;
ofm1 = mb1ofm1/nBlocksMB;
trans_param.in.primary = &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, 0, 0, nBlocksOFm, bn, bk);
trans_param.out.primary = &LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, mb1, 0, 0, 0, nBlocksMB, bn_lp, bk, lpb);
cfg.norm_to_vnni_kernel(&trans_param);
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
if (use_2d_blocking == 1) {
ifm2 = 0;
ofm2 = 0;
if (BF == 1) {
for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) {
for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) {
cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, 0, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, 0, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb), &blocks);
}
}
} else {
for (bfn = 0; bfn < BF; bfn++) {
for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) {
for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) {
/* initialize current work task to zero */
if (bfn == 0) {
copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk);
cfg.upd_zero_kernel(©_params);
}
cfg.gemm_upd(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, bfn*blocks, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, bfn*blocks, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks);
/* Downconvert result to BF16 and vnni format */
if (bfn == BF-1) {
eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk);
eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, 0, 0, 0, nBlocksIFm, bc_lp, bk, lpb);
eltwise_kernel2(&eltwise_params);
}
}
}
}
}
} else {
if (BF == 1) {
for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) {
ofm1 = ifm1ofm1 / Cck_work;
ofm2 = (ifm1ofm1 % Cck_work) / Cc_work;
ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks;
ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks;
cfg.gemm_upd3(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, 0, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, 0, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, (ifm2*bbc)/lpb, ofm2*bbk, 0, nBlocksIFm, bc_lp, bk, lpb), &blocks);
}
} else {
for (bfn = 0; bfn < BF; bfn++) {
for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) {
ofm1 = ifm1ofm1 / Cck_work;
ofm2 = (ifm1ofm1 % Cck_work) / Cc_work;
ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks;
ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks;
/* initialize current work task to zero */
if (bfn == 0) {
copy_params.out.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk);
cfg.upd_zero_kernel(©_params);
}
cfg.gemm_upd(&LIBXSMM_VLA_ACCESS(5, doutput_tr, ofm1, bfn*blocks, 0, ofm2*bbk, 0, nBlocksMB, bn_lp, bk, lpb), &LIBXSMM_VLA_ACCESS(4, input_tr, ifm1, bfn*blocks, ifm2*bbc, 0, nBlocksMB, bc, bn), &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks);
/* Downconvert result to BF16 and vnni format */
if (bfn == BF-1) {
eltwise_params.in.primary = &LIBXSMM_VLA_ACCESS(4, dfilter_f32, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk);
eltwise_params.out.primary = &LIBXSMM_VLA_ACCESS(5, dfilter, ofm1, ifm1, (ifm2*bbc)/lpb, ofm2*bbk, 0, nBlocksIFm, bc_lp, bk, lpb);
eltwise_kernel2(&eltwise_params);
}
}
}
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
}
cfg.tilerelease_kernel(NULL, NULL, NULL);
}
void my_opt_exec( my_opt_config cfg, libxsmm_bfloat16* wt_ptr, float* master_wt_ptr, const libxsmm_bfloat16* delwt_ptr, int start_tid, int my_tid, void* scratch ) {
/* loop counters */
libxsmm_blasint i;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could run in parallel for the filters */
const libxsmm_blasint work = cfg.C * cfg.K;
/* compute chunk size */
const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
/* lazy barrier init */
libxsmm_barrier_init( cfg.barrier, ltid );
#if defined(__AVX512BW__)
libxsmm_blasint iv = ( (thr_end-thr_begin)/16 ) * 16; /* compute iterations which are vectorizable */
__m512 vlr = _mm512_set1_ps( cfg.lr );
for ( i = thr_begin; i < thr_begin+iv; i+=16 ) {
__m512 newfilter = _mm512_sub_ps( _mm512_loadu_ps( master_wt_ptr+i ), _mm512_mul_ps( vlr, _mm512_load_fil( delwt_ptr + i ) ) );
_mm512_store_fil( wt_ptr+i, newfilter );
_mm512_storeu_ps( master_wt_ptr+i, newfilter );
}
for ( i = thr_begin+iv; i < thr_end; ++i ) {
libxsmm_bfloat16_hp t1, t2;
t1.i[0] =0;
t1.i[1] = delwt_ptr[i];
master_wt_ptr[i] = master_wt_ptr[i] - (cfg.lr*t1.f);
t2.f = master_wt_ptr[i];
wt_ptr[i] = t2.i[1];
}
#else
for ( i = thr_begin; i < thr_end; ++i ) {
libxsmm_bfloat16_hp t1, t2;
t1.i[0] =0;
t1.i[1] = delwt_ptr[i];
master_wt_ptr[i] = master_wt_ptr[i] - (cfg.lr*t1.f);
t2.f = master_wt_ptr[i];
wt_ptr[i] = t2.i[1];
}
#endif
libxsmm_barrier_wait( cfg.barrier, ltid );
}
void my_smax_fwd_exec( my_smax_fwd_config cfg, const libxsmm_bfloat16* in_act_ptr, libxsmm_bfloat16* out_act_ptr, const int* label_ptr, float* loss, int start_tid, int my_tid, void* scratch ) {
libxsmm_blasint bn = cfg.bn;
libxsmm_blasint Bn = cfg.N/cfg.bn;
libxsmm_blasint bc = cfg.bc;
libxsmm_blasint Bc = cfg.C/cfg.bc;
/* loop counters */
libxsmm_blasint i = 0;
libxsmm_blasint img1, img2, ifm1, ifm2;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could run in parallel for the batch */
const libxsmm_blasint n_work = Bn * bn;
/* compute chunk size */
const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work;
const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work;
/* number of tasks that could run in parallel for the batch */
const libxsmm_blasint nc_work = Bn * bn;
/* compute chunk size */
const libxsmm_blasint nc_chunksize = (nc_work % cfg.threads == 0) ? (nc_work / cfg.threads) : ((nc_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint nc_thr_begin = (ltid * nc_chunksize < nc_work) ? (ltid * nc_chunksize) : nc_work;
const libxsmm_blasint nc_thr_end = ((ltid + 1) * nc_chunksize < nc_work) ? ((ltid + 1) * nc_chunksize) : nc_work;
libxsmm_bfloat16* poutput_bf16 = out_act_ptr;
const libxsmm_bfloat16* pinput_bf16 = in_act_ptr;
float* poutput_fp32 = (float*)scratch;
float* pinput_fp32 = ((float*)scratch)+(cfg.N*cfg.C);
LIBXSMM_VLA_DECL(4, float, output, poutput_fp32, Bc, bn, bc);
LIBXSMM_VLA_DECL(4, const float, input, pinput_fp32, Bc, bn, bc);
LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn);
/* lazy barrier init */
libxsmm_barrier_init( cfg.barrier, ltid );
for ( i = nc_thr_begin; i < nc_thr_end; ++i ) {
libxsmm_bfloat16_hp in;
in.i[0] = 0;
in.i[1] = pinput_bf16[i];
pinput_fp32[i] = in.f;
}
libxsmm_barrier_wait( cfg.barrier, ltid );
for ( i = n_thr_begin; i < n_thr_end; ++i ) {
float max = FLT_MIN;
float sum_of_exp = 0.0f;
img1 = i/bn;
img2 = i%bn;
/* set output to input and set compute max per image */
for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc );
if ( LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ) > max ) {
max = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc );
}
}
}
/* sum exp over outputs */
for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = (float)exp( (double)(LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - max) );
sum_of_exp += LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc );
}
}
/* scale output */
sum_of_exp = 1.0f/sum_of_exp;
for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * sum_of_exp;
}
}
}
libxsmm_barrier_wait( cfg.barrier, ltid );
/* calculate loss single threaded */
if ( ltid == 0 ) {
(*loss) = 0.0f;
for ( img1 = 0; img1 < Bn; ++img1 ) {
for ( img2 = 0; img2 <bn; ++img2 ) {
libxsmm_blasint ifm = (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn );
libxsmm_blasint ifm1b = ifm/bc;
libxsmm_blasint ifm2b = ifm%bc;
float val = ( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) > FLT_MIN ) ? LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) : FLT_MIN;
*loss = LIBXSMM_LOGF( val );
}
}
*loss = ((-1.0f)*(*loss))/cfg.N;
}
libxsmm_barrier_wait( cfg.barrier, ltid );
for ( i = nc_thr_begin; i < nc_thr_end; ++i ) {
libxsmm_bfloat16_hp in;
in.f = poutput_fp32[i];
poutput_bf16[i] = in.i[1];
}
libxsmm_barrier_wait( cfg.barrier, ltid );
}
void my_smax_bwd_exec( my_smax_bwd_config cfg, libxsmm_bfloat16* delin_act_ptr, const libxsmm_bfloat16* out_act_ptr, const int* label_ptr, int start_tid, int my_tid, void* scratch ) {
libxsmm_blasint bn = cfg.bn;
libxsmm_blasint Bn = cfg.N/cfg.bn;
libxsmm_blasint bc = cfg.bc;
libxsmm_blasint Bc = cfg.C/cfg.bc;
/* loop counters */
libxsmm_blasint i = 0;
libxsmm_blasint img1, img2, ifm1, ifm2;
float rcp_N = 1.0f/cfg.N;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could run in parallel for the batch */
const libxsmm_blasint n_work = Bn * bn;
/* compute chunk size */
const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work;
const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work;
/* number of tasks that could run in parallel for the batch */
const int nc_work = Bn * bn;
/* compute chunk size */
const int nc_chunksize = (nc_work % cfg.threads == 0) ? (nc_work / cfg.threads) : ((nc_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const int nc_thr_begin = (ltid * nc_chunksize < nc_work) ? (ltid * nc_chunksize) : nc_work;
const int nc_thr_end = ((ltid + 1) * nc_chunksize < nc_work) ? ((ltid + 1) * nc_chunksize) : nc_work;
const libxsmm_bfloat16* poutput_bf16 = out_act_ptr;
libxsmm_bfloat16* pdinput_bf16 = delin_act_ptr;
float* poutput_fp32 = (float*)scratch;
float* pdinput_fp32 = ((float*)scratch)+(cfg.N*cfg.C);
LIBXSMM_VLA_DECL(4, const float, output, poutput_fp32, Bc, bn, bc);
LIBXSMM_VLA_DECL(4, float, dinput, pdinput_fp32, Bc, bn, bc);
LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn);
/* lazy barrier init */
libxsmm_barrier_init( cfg.barrier, ltid );
for ( i = nc_thr_begin; i < nc_thr_end; ++i ) {
libxsmm_bfloat16_hp out;
out.i[0] = 0;
out.i[1] = poutput_bf16[i];
poutput_fp32[i] = out.f;
}
libxsmm_barrier_wait( cfg.barrier, ltid );
for ( i = n_thr_begin; i < n_thr_end; ++i ) {
img1 = i/bn;
img2 = i%bn;
/* set output to input and set compute max per image */
for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
if ( (ifm1*Bc)+ifm2 == (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn ) ) {
LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) =
( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - 1.0f ) * rcp_N * cfg.loss_weight;
} else {
LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) =
LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * rcp_N * cfg.loss_weight;
}
}
}
}
libxsmm_barrier_wait( cfg.barrier, ltid );
for ( i = nc_thr_begin; i < nc_thr_end; ++i ) {
libxsmm_bfloat16_hp in;
in.f = pdinput_fp32[i];
pdinput_bf16[i] = in.i[1];
}
libxsmm_barrier_wait( cfg.barrier, ltid );
}
void *numa_alloc_onnode_aligned(size_t size, int numa_node, int alignment_) {
#if 0
int alignment = alignment_ - 1;
size_t adj_size = sizeof(size_t) + alignment;
void *r_ptr = NULL;
void *t_ptr = numa_alloc_onnode(size + adj_size, numa_node);
if (t_ptr == NULL) return NULL;
r_ptr = (void *)(((size_t)t_ptr + adj_size) & ~alignment);
*((size_t*)r_ptr - 1) = (size_t)r_ptr - (size_t)t_ptr;
return r_ptr;
#else
return numa_alloc_onnode(size, numa_node);
#endif
}
void numa_free_aligned(void *ptr, size_t size) {
#if 0
if (ptr == NULL) return;
void *t_ptr = (void*)((size_t*)ptr - *((size_t*)ptr - 1));
numa_free(t_ptr, size);
#else
numa_free(ptr, size);
#endif
}
int setup_my_numa(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, int n_threads) {
int max_nodes = numa_max_node() + 1;
int max_cfg_nodes = numa_num_configured_nodes();
int max_cfg_cpus = numa_num_configured_cpus();
int max_task_cpus = numa_num_task_cpus();
my_numa_thr_cfg *numa_thr_cfg = (my_numa_thr_cfg *) malloc(sizeof(my_numa_thr_cfg) * max_cfg_nodes);
printf("FWD NUMA configuration:\n");
printf("There are %d numa nodes on the system\n", max_nodes);
printf("There are %d configured numa nodes on the system\n", max_cfg_nodes);
printf("There are %d configured CPUs on the system\n", max_cfg_cpus);
printf("There are %d CPUs asigned for the current task\n", max_task_cpus);
struct bitmask* bmask = numa_bitmask_alloc(max_cfg_cpus);
int thr_count = 0, i = 0;
for (i = 0; i < max_cfg_nodes; i++) {
numa_node_to_cpus(i, bmask);
numa_thr_cfg[i].scratch = (libxsmm_bfloat16**) malloc(sizeof(libxsmm_bfloat16*) * num_layers);
numa_thr_cfg[i].layer_size = (size_t*)malloc(sizeof(size_t)*num_layers);
numa_thr_cfg[i].blocksOFm_s = (int*)malloc(sizeof(int)*num_layers);
numa_thr_cfg[i].blocksOFm_e = (int*)malloc(sizeof(int)*num_layers);
/*
printf("@@@@@ node %d size %zd cpus ", i, bmask->size);
size_t j = 0;
for(j = 0; j < bmask->size; j++)
printf("%d", numa_bitmask_isbitset(bmask, j));
printf("\n");
*/
int num_threads_in_mask = 0;
int t = 0;
for (t = 0; t < bmask->size; t++)
if (numa_bitmask_isbitset(bmask, t)) num_threads_in_mask++;
int node_threads = 0;
while(thr_count < n_threads && node_threads < num_threads_in_mask) {
if (numa_bitmask_isbitset(bmask, thr_count)) {
numa_thr_cfg[i].thr_s = thr_count;
break;
}
thr_count++; node_threads++;
}
while(thr_count < n_threads && node_threads < num_threads_in_mask) {
if (numa_bitmask_isbitset(bmask, thr_count))
numa_thr_cfg[i].thr_e = thr_count;
thr_count++; node_threads++;
}
}
*numa_thr_cfg_ = numa_thr_cfg;
return 1;
}
int setup_my_numa_fwd(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int max_cfg_nodes = numa_num_configured_nodes();
int i = 0;
for (i = 0; i < max_cfg_nodes; i++) {
int l = 0;
for (l = 0; l < num_layers; l++) {
if (my_fc_fwd[l].fwd_bf > 1) {
printf("@@@ NUMA ERROR: doesn't support this configuration\n");
return -1;
}
int thr = 0;
const libxsmm_blasint nBlocksOFm = my_fc_fwd[l].K / my_fc_fwd[l].bk;
const libxsmm_blasint nBlocksMB = my_fc_fwd[l].N / my_fc_fwd[l].bn;
if (my_fc_fwd[l].fwd_2d_blocking == 1) {
libxsmm_blasint row_teams = my_fc_fwd[l].fwd_row_teams;
libxsmm_blasint M_tasks_per_thread = LIBXSMM_UPDIV(nBlocksOFm, row_teams);
numa_thr_cfg[i].blocksOFm_s[l] = nBlocksOFm;
numa_thr_cfg[i].blocksOFm_e[l] = 0;
for (thr = numa_thr_cfg[i].thr_s; thr <= numa_thr_cfg[i].thr_e
&& numa_thr_cfg[i].thr_s != numa_thr_cfg[i].thr_e; thr++) {
libxsmm_blasint my_row_id = thr % row_teams; /* ltid */
libxsmm_blasint my_M_start = LIBXSMM_MIN(my_row_id * M_tasks_per_thread, nBlocksOFm);
libxsmm_blasint my_M_end = LIBXSMM_MIN((my_row_id+1) * M_tasks_per_thread, nBlocksOFm);
numa_thr_cfg[i].blocksOFm_s[l] = (my_M_start <= numa_thr_cfg[i].blocksOFm_s[l])
? my_M_start
: numa_thr_cfg[i].blocksOFm_s[l];
numa_thr_cfg[i].blocksOFm_e[l] = (my_M_end >= numa_thr_cfg[i].blocksOFm_e[l])
? my_M_end
: numa_thr_cfg[i].blocksOFm_e[l];
}
} else {
numa_thr_cfg[i].blocksOFm_s[l] = nBlocksOFm;
numa_thr_cfg[i].blocksOFm_e[l] = 0;
for (thr = numa_thr_cfg[i].thr_s; thr <= numa_thr_cfg[i].thr_e
&& numa_thr_cfg[i].thr_s != numa_thr_cfg[i].thr_e; thr++) {
const libxsmm_blasint work = nBlocksOFm * nBlocksMB;
const libxsmm_blasint chunksize = (work % my_fc_fwd[l].threads == 0) ?
(work / my_fc_fwd[l].threads) : ((work / my_fc_fwd[l].threads) + 1);
const libxsmm_blasint thr_begin = (thr * chunksize < work) ? (thr * chunksize) : work;
const libxsmm_blasint thr_end = ((thr + 1) * chunksize < work) ? ((thr + 1) * chunksize) : work;
int ofm_s = thr_begin / nBlocksMB;
int ofm_e = thr_end / nBlocksMB;
numa_thr_cfg[i].blocksOFm_s[l] = (ofm_s <= numa_thr_cfg[i].blocksOFm_s[l])
? ofm_s
: numa_thr_cfg[i].blocksOFm_s[l];
numa_thr_cfg[i].blocksOFm_e[l] = (ofm_e >= numa_thr_cfg[i].blocksOFm_e[l])
? ofm_e
: numa_thr_cfg[i].blocksOFm_e[l];
}
}
}
}
return 1;
}
int allocate_numa_buffers_fwd(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int max_cfg_nodes = numa_num_configured_nodes();
int i = 0, l = 0;
for (i = 0; i < max_cfg_nodes; i++) {
for (l = 0; l < num_layers; l++) {
const libxsmm_blasint nBlocksIFm = my_fc_fwd[l].C / my_fc_fwd[l].bc;
const libxsmm_blasint BOFM_shift = nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk;
int l_nBlocksOFm = (numa_thr_cfg[i].blocksOFm_e[l] - numa_thr_cfg[i].blocksOFm_s[l]) + 1;
if (l_nBlocksOFm <= 0)
continue;
numa_thr_cfg[i].layer_size[l] = sizeof(libxsmm_bfloat16) * ((l_nBlocksOFm) * BOFM_shift);
numa_thr_cfg[i].scratch[l] = (libxsmm_bfloat16*)numa_alloc_onnode_aligned(numa_thr_cfg[i].layer_size[l], i, 2097152);
if (numa_thr_cfg[i].scratch[l] == NULL) {
printf("@@@ NUMA ERROR: cannot allocate on node #%d\n", i);
return -1;
}
}
}
return 1;
}
int copy_to_numa_buffers_fwd_inf(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd, libxsmm_bfloat16 **fil_libxsmm) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int max_cfg_nodes = numa_num_configured_nodes();
int i, l;
#pragma omp parallel for collapse(2) private (i,l)
for (i = 0; i < max_cfg_nodes; i++) {
for (l = 0; l < num_layers; l++) {
const libxsmm_blasint nBlocksIFm = my_fc_fwd[l].C / my_fc_fwd[l].bc;
const libxsmm_blasint BOFM_shift = nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk;
int l_nBlocksOFm = (numa_thr_cfg[i].blocksOFm_e[l] - numa_thr_cfg[i].blocksOFm_s[l]) + 1;
int j = 0;
for (j = 0; j < l_nBlocksOFm ; j++) {
size_t l_BOFM_shift = j * BOFM_shift;
libxsmm_bfloat16 *out = numa_thr_cfg[i].scratch[l] + l_BOFM_shift;
libxsmm_bfloat16 *inp = fil_libxsmm[l] + numa_thr_cfg[i].blocksOFm_s[l] * BOFM_shift + l_BOFM_shift;
memcpy(out, inp, sizeof(libxsmm_bfloat16) * nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk);
}
}
}
return 1;
}
int main(int argc, char* argv[])
{
libxsmm_bfloat16 **act_libxsmm, **fil_libxsmm, **delact_libxsmm, **delfil_libxsmm;
libxsmm_bfloat16 **bias_libxsmm, **delbias_libxsmm;
float **fil_master;
unsigned char **relumask_libxsmm;
int *label_libxsmm;
my_eltwise_fuse my_fuse;
my_fc_fwd_config* my_fc_fwd;
my_fc_bwd_config* my_fc_bwd;
my_opt_config* my_opt;
my_smax_fwd_config my_smax_fwd;
my_smax_bwd_config my_smax_bwd;
void* scratch = NULL;
size_t scratch_size = 0;
#ifdef CHECK_L1
float *last_act_fwd_f32 = NULL;
float *first_wt_bwdupd_f32 = NULL;
#endif
/* some parameters we can overwrite via cli,
default is some inner layer of overfeat */
int iters = 10; /* repetitions of benchmark */
int MB = 32; /* mini-batch size, "N" */
int fuse_type = 0; /* 0: nothing fused, 1: relu fused, 2: elementwise fused, 3: relu and elementwise fused */
char type = 'A'; /* 'A': ALL, 'F': FP, 'B': BP */
int bn = 64;
int bk = 64;
int bc = 64;
int *C; /* number of input feature maps, "C" */
int num_layers = 0;
const char *const env_check = getenv("CHECK");
const double check = LIBXSMM_ABS(0 == env_check ? 1 : atof(env_check));
#if defined(_OPENMP)
int nThreads = omp_get_max_threads(); /* number of threads */
#else
int nThreads = 1; /* number of threads */
#endif
unsigned long long l_start, l_end;
double l_total = 0.0;
double gflop = 0.0;
int i, j;
double act_size = 0.0;
double fil_size = 0.0;
float lr = 0.2f;
float loss_weight = 0.1f;
libxsmm_matdiff_info norms_fwd, norms_bwd, norms_upd, diff;
libxsmm_matdiff_clear(&norms_fwd);
libxsmm_matdiff_clear(&norms_bwd);
libxsmm_matdiff_clear(&norms_upd);
libxsmm_matdiff_clear(&diff);
if (argc > 1 && !strncmp(argv[1], "-h", 3)) {
printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]);
return 0;
}
libxsmm_rng_set_seed(1);
/* reading new values from cli */
i = 1;
num_layers = argc - 9;
if (argc > i) iters = atoi(argv[i++]);
if (argc > i) MB = atoi(argv[i++]);
if (argc > i) fuse_type = atoi(argv[i++]);
if (argc > i) type = *(argv[i++]);
if (argc > i) bn = atoi(argv[i++]);
if (argc > i) bk = atoi(argv[i++]);
if (argc > i) bc = atoi(argv[i++]);
/* allocate the number of channles buffer */
if ( num_layers < 1 ) {
printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]);
return 0;
}
C = (int*)malloc((num_layers+2)*sizeof(int));
for (j = 0 ; i < argc; ++i, ++j ) {
C[j] = atoi(argv[i]);
}
/* handle softmax config */
C[num_layers+1] = C[num_layers];
if (type != 'A' && type != 'F' && type != 'B') {
printf("type needs to be 'A' (All), 'F' (FP only), 'B' (BP only)\n");
return -1;
}
if ( (fuse_type < 0) || (fuse_type > 5) ) {
printf("fuse type needs to be 0 (None), 1 (Bias), 2 (ReLU), 3 (Sigmoid), 4 (Bias+ReLU), 5 (Bias+Sigmoid)\n");
return -1;
}
#if defined(__SSE3__)
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
#endif
/* print some summary */
printf("##########################################\n");
printf("# Setting Up (Common) #\n");
printf("##########################################\n");
printf("PARAMS: N:%d\n", MB);
printf("PARAMS: Layers: %d\n", num_layers);
printf("PARAMS: ITERS:%d", iters); if (LIBXSMM_FEQ(0, check)) printf(" Threads:%d\n", nThreads); else printf("\n");
for (i = 0; i < num_layers; ++i ) {
if (i == 0) {
act_size += (double)(MB*C[i]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0);
printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, MB, C[i], (double)(MB*C[i]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) );
}
act_size += (double)(MB*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0);
fil_size += (double)(C[i]*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0);
printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i+1], (double)(C[i]*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) );
printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i+1, MB, C[i+1], (double)(MB*C[i+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) );
}
act_size += (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0);
printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", MB, C[num_layers+1], (double)(MB*C[num_layers+1]*sizeof(libxsmm_bfloat16))/(1024.0*1024.0) );
printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size );
printf("TOTAL SIZE Filter (incl. master): %10.2f MiB\n", 3.0*fil_size );
printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size );
printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size );
printf("TOTAL SIZE MLP: %10.2f MiB\n", (4.0*fil_size) + (2.0*act_size) );
/* allocate data */
act_libxsmm = (libxsmm_bfloat16**)malloc( (num_layers+2)*sizeof(libxsmm_bfloat16*) );
delact_libxsmm = (libxsmm_bfloat16**)malloc( (num_layers+1)*sizeof(libxsmm_bfloat16*) );
for ( i = 0 ; i < num_layers+2; ++i ) {
#ifdef ACT_NUMA_INTERLEAVED
act_libxsmm[i] = (libxsmm_bfloat16*)numa_alloc_interleaved( MB*C[i]*sizeof(libxsmm_bfloat16));
#else
act_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( MB*C[i]*sizeof(libxsmm_bfloat16), 2097152);
#endif
/* softmax has no incoming gradients */
if ( i < num_layers+1 ) {
delact_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( MB*C[i]*sizeof(libxsmm_bfloat16), 2097152);
}
}
fil_master = (float**) malloc( num_layers*sizeof(float*) );
fil_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) );
delfil_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) );
for ( i = 0 ; i < num_layers; ++i ) {
fil_master[i] = (float*) libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152);
fil_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(libxsmm_bfloat16), 2097152);
delfil_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(libxsmm_bfloat16), 2097152);
}
bias_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) );
delbias_libxsmm = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) );
for ( i = 0 ; i < num_layers; ++i ) {
bias_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i+1]*sizeof(libxsmm_bfloat16), 2097152);
delbias_libxsmm[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i+1]*sizeof(libxsmm_bfloat16), 2097152);
}
relumask_libxsmm = (unsigned char**)malloc( num_layers*sizeof(unsigned char*) );
for ( i = 0 ; i < num_layers; ++i ) {
relumask_libxsmm[i] = (unsigned char*)libxsmm_aligned_malloc( MB*C[i+1]*sizeof(unsigned char), 2097152);
}
label_libxsmm = (int*)libxsmm_aligned_malloc( MB*sizeof(int), 2097152);
/* init data */
for ( i = 0 ; i < num_layers+2; ++i ) {
my_init_buf_bf16( act_libxsmm[i], MB*C[i], 0, 0 );
}
for ( i = 0 ; i < num_layers+1; ++i ) {
my_init_buf_bf16( delact_libxsmm[i], MB*C[i], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
#if 0
{
float *cur_fil = (float*) malloc(C[i]*C[i+1]*sizeof(float));
my_init_buf( cur_fil, C[i]*C[i+1], 0, 0 );
my_matrix_copy_KCCK_to_KCCK_vnni(cur_fil, fil_master[i], C[i], C[i+1], bc, bk);
libxsmm_rne_convert_fp32_bf16( fil_master[i], fil_libxsmm[i], C[i]*C[i+1] );
free(cur_fil);
}
#else
my_init_buf( fil_master[i], C[i]*C[i+1], 0, 0 );
libxsmm_rne_convert_fp32_bf16( fil_master[i], fil_libxsmm[i], C[i]*C[i+1] );
#endif
}
for ( i = 0 ; i < num_layers; ++i ) {
#if 0
float *cur_fil = (float*) malloc(C[i]*C[i+1]*sizeof(float));
float *cur_fil_vnni = (float*) malloc(C[i]*C[i+1]*sizeof(float));
my_init_buf( cur_fil, C[i]*C[i+1], 0, 0 );
my_matrix_copy_KCCK_to_KCCK_vnni(cur_fil, cur_fil_vnni, C[i], C[i+1], bc, bk);
libxsmm_rne_convert_fp32_bf16( cur_fil_vnni, delfil_libxsmm[i], C[i]*C[i+1] );
free(cur_fil);
free(cur_fil_vnni);
#else
my_init_buf_bf16( delfil_libxsmm[i], C[i]*C[i+1], 0, 0 );
#endif
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf_bf16( bias_libxsmm[i], C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf_bf16( delbias_libxsmm[i], C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
zero_buf_uint8( relumask_libxsmm[i], MB*C[i+1] );
}
zero_buf_int32( label_libxsmm, MB );
printf("\n");
printf("##########################################\n");
printf("# Setting Up (custom-Storage) #\n");
printf("##########################################\n");
if ( fuse_type == 0 ) {
my_fuse = MY_ELTWISE_FUSE_NONE;
} else if ( fuse_type == 1 ) {
my_fuse = MY_ELTWISE_FUSE_BIAS;
} else if ( fuse_type == 2 ) {
my_fuse = MY_ELTWISE_FUSE_RELU;
} else if ( fuse_type == 4 ) {
my_fuse = MY_ELTWISE_FUSE_BIAS_RELU;
} else {
my_fuse = MY_ELTWISE_FUSE_NONE;
}
/* allocating handles */
my_fc_fwd = (my_fc_fwd_config*) malloc( num_layers*sizeof(my_fc_fwd_config) );
my_fc_bwd = (my_fc_bwd_config*) malloc( num_layers*sizeof(my_fc_bwd_config) );
my_opt = (my_opt_config*) malloc( num_layers*sizeof(my_opt_config) );
/* setting up handles + scratch */
for ( i = 0; i < num_layers; ++i ) {
my_fc_fwd[i] = setup_my_fc_fwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB,
(C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
nThreads, my_fuse);
my_fc_bwd[i] = setup_my_fc_bwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB,
(C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
nThreads, my_fuse);
my_opt[i] = setup_my_opt( C[i], C[i+1], (C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
nThreads, lr );
/* let's allocate and bind scratch */
if ( my_fc_fwd[i].scratch_size > 0 || my_fc_bwd[i].scratch_size > 0 || my_opt[i].scratch_size > 0 ) {
size_t alloc_size = LIBXSMM_MAX( LIBXSMM_MAX( my_fc_fwd[i].scratch_size, my_fc_bwd[i].scratch_size), my_opt[i].scratch_size );
if ( alloc_size > scratch_size ) {
if ( scratch != NULL ) libxsmm_free( scratch );
scratch_size = alloc_size;
scratch = libxsmm_aligned_malloc( scratch_size, 2097152 );
my_init_buf( (float*)(scratch), (scratch_size)/4, 0, 0 );
}
}
}
/* softmax+loss is treated as N+! layer */
my_smax_fwd = setup_my_smax_fwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB,
(C[num_layers+1] % bk == 0) ? bk : C[num_layers+1],
nThreads );
my_smax_bwd = setup_my_smax_bwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB,
(C[num_layers+1] % bk == 0) ? bk : C[num_layers+1],
nThreads, loss_weight );
if ( my_smax_fwd.scratch_size > 0 || my_smax_bwd.scratch_size > 0 ) {
size_t alloc_size = LIBXSMM_MAX( my_smax_fwd.scratch_size, my_smax_bwd.scratch_size );
if ( alloc_size > scratch_size ) {
if ( scratch != NULL ) libxsmm_free( scratch );
scratch_size = alloc_size;
scratch = libxsmm_aligned_malloc( scratch_size, 2097152 );
my_init_buf( (float*)(scratch), (scratch_size)/4, 0, 0 );
}
}
my_numa_thr_cfg *numa_thr_cfg;
setup_my_numa(&numa_thr_cfg, num_layers, nThreads);
if ( type == 'F') {
printf("##########################################\n");
printf("# Performance - FWD (custom-Storage) #\n");
printf("##########################################\n");
setup_my_numa_fwd(&numa_thr_cfg, num_layers, my_fc_fwd);
allocate_numa_buffers_fwd(&numa_thr_cfg, num_layers, my_fc_fwd);
l_start = libxsmm_timer_tick();
copy_to_numa_buffers_fwd_inf(&numa_thr_cfg, num_layers, my_fc_fwd, fil_libxsmm);
#if defined(_OPENMP)
# pragma omp parallel private(i,j)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
const int numa_node = numa_node_of_cpu(tid);
for (j = 0; j < iters; ++j) {
for ( i = 0; i < num_layers; ++i) {
libxsmm_bfloat16 *filt = numa_thr_cfg[numa_node].scratch[i];
my_fc_fwd_exec( my_fc_fwd[i], filt, act_libxsmm[i], act_libxsmm[i+1],
bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch, &numa_thr_cfg[numa_node], i);
}
#ifdef USE_SOFTMAX
my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss,
0, tid, scratch );
#endif
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
gflop = 0.0;
for ( i = 0; i < num_layers; ++i) {
gflop += (2.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0);
}
printf("GFLOP = %.5g\n", gflop/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,FP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total);
/* Print some norms on last act for fwd and weights of first layer after all iterations */
last_act_fwd_f32 = (float*) malloc(MB*C[num_layers]*sizeof(float));
libxsmm_convert_bf16_f32( act_libxsmm[num_layers], last_act_fwd_f32, MB*C[num_layers]);
libxsmm_matdiff(&norms_fwd, LIBXSMM_DATATYPE_F32, MB*C[num_layers], 1, last_act_fwd_f32, last_act_fwd_f32, 0, 0);
printf("L1 of act[num_layers] : %.25g\n", norms_fwd.l1_ref);
}
if (type == 'B') {
printf("##########################################\n");
printf("# Performance - BWD (custom-Storage) #\n");
printf("##########################################\n");
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i,j)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
for (j = 0; j < iters; ++j) {
#ifdef USE_SOFTMAX
my_smax_bwd_exec( my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm,
0, tid, scratch );
#endif
for ( i = num_layers-1; i > 0; --i) {
my_fc_bwd_exec( my_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i],
act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch );
my_opt_exec( my_opt[i], fil_libxsmm[i], fil_master[i], delfil_libxsmm[i], 0, tid, scratch );
}
my_fc_bwd_exec( my_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0],
act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch );
my_opt_exec( my_opt[0], fil_libxsmm[0], fil_master[0], delfil_libxsmm[0], 0, tid, scratch );
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
gflop = 0.0;
for ( i = num_layers-1; i > 0; --i) {
gflop += (4.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0);
}
gflop += (2.0*(double)MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0);
printf("GFLOP = %.5g\n", gflop/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total);
}
if (type == 'A') {
printf("#########################################################\n");
printf("# Unimplemented: Performance - FWD-BWD (custom-Storage) #\n");
printf("#########################################################\n");
exit(-1);
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i,j)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
for (j = 0; j < iters; ++j) {
for ( i = 0; i < num_layers; ++i) {
my_fc_fwd_exec( my_fc_fwd[i], fil_libxsmm[i], act_libxsmm[i], act_libxsmm[i+1],
bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch, NULL, 0);
}
#ifdef USE_SOFTMAX
my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss,
0, tid, scratch );
my_smax_bwd_exec( my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm,
0, tid, scratch );
#endif
for ( i = num_layers-1; i > 0; --i) {
my_fc_bwd_exec( my_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i],
act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch );
my_opt_exec( my_opt[i], fil_libxsmm[i], fil_master[i], delfil_libxsmm[i], 0, tid, scratch );
}
my_fc_bwd_exec( my_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0],
act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch );
my_opt_exec( my_opt[0], fil_libxsmm[0], fil_master[0], delfil_libxsmm[0], 0, tid, scratch );
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
#ifdef CHECK_L1
/* Print some norms on last act for fwd and weights of first layer after all iterations */
last_act_fwd_f32 = (float*) malloc(MB*C[num_layers]*sizeof(float));
first_wt_bwdupd_f32 = (float*) malloc(C[0]*C[1]*sizeof(float));
libxsmm_convert_bf16_f32( act_libxsmm[num_layers], last_act_fwd_f32, MB*C[num_layers]);
#if 1
libxsmm_convert_bf16_f32( fil_libxsmm[0], first_wt_bwdupd_f32, C[0]*C[1]);
libxsmm_matdiff(&norms_fwd, LIBXSMM_DATATYPE_F32, MB*C[num_layers], 1, last_act_fwd_f32, last_act_fwd_f32, 0, 0);
printf("L1 of act[num_layers] : %.25g\n", norms_fwd.l1_ref);
libxsmm_matdiff_reduce(&diff, &norms_fwd);
libxsmm_matdiff(&norms_bwd, LIBXSMM_DATATYPE_F32, C[0]*C[1], 1, first_wt_bwdupd_f32, first_wt_bwdupd_f32, 0, 0);
printf("L1 of wt[0] : %.25g\n", norms_bwd.l1_ref);
libxsmm_matdiff_reduce(&diff, &norms_bwd);
#else
{
int e = 0;
FILE *fileAct, *fileWt;
float *ref_last_act_fwd_f32 = (float*) malloc(MB*C[num_layers]*sizeof(float));
float *ref_first_wt_bwdupd_f32 = (float*) malloc(C[0]*C[1]*sizeof(float));
float *ref_first_wt_bwdupd_f32_kc = (float*) malloc(C[0]*C[1]*sizeof(float));
libxsmm_bfloat16 *first_wt_bwdupd_bf16 = (libxsmm_bfloat16*) malloc(C[0]*C[1]*sizeof(libxsmm_bfloat16));
fileAct = fopen("acts.txt","r");
if (fileAct != NULL) {
int bufferLength = 255;
char buffer[bufferLength];
e = 0;
while(fgets(buffer, bufferLength, fileAct)) {
ref_last_act_fwd_f32[e] = atof(buffer);
e++;
}
fclose(fileAct);
}
/* compare */
libxsmm_matdiff(&norms_fwd, LIBXSMM_DATATYPE_F32, MB*C[num_layers], 1, ref_last_act_fwd_f32, last_act_fwd_f32, 0, 0);
printf("##########################################\n");
printf("# Correctness - Last fwd act #\n");
printf("##########################################\n");
printf("L1 reference : %.25g\n", norms_fwd.l1_ref);
printf("L1 test : %.25g\n", norms_fwd.l1_tst);
printf("L2 abs.error : %.24f\n", norms_fwd.l2_abs);
printf("L2 rel.error : %.24f\n", norms_fwd.l2_rel);
printf("Linf abs.error: %.24f\n", norms_fwd.linf_abs);
printf("Linf rel.error: %.24f\n", norms_fwd.linf_rel);
printf("Check-norm : %.24f\n", norms_fwd.normf_rel);
libxsmm_matdiff_reduce(&diff, &norms_fwd);
fileWt = fopen("weights.txt","r");
if (fileWt != NULL) {
int bufferLength = 255;
char buffer[bufferLength];
e = 0;
while(fgets(buffer, bufferLength, fileWt)) {
ref_first_wt_bwdupd_f32[e] = atof(buffer);
e++;
}
fclose(fileWt);
}
matrix_copy_KCCK_to_KC( ref_first_wt_bwdupd_f32, ref_first_wt_bwdupd_f32_kc, C[0], C[1], bc, bk );
matrix_copy_KCCK_to_KC_bf16( fil_libxsmm[0], first_wt_bwdupd_bf16, C[0], C[1], bc, bk );
libxsmm_convert_bf16_f32( first_wt_bwdupd_bf16, first_wt_bwdupd_f32, C[0]*C[1] );
/* compare */
libxsmm_matdiff(&norms_bwd, LIBXSMM_DATATYPE_F32, C[0]*C[1], 1, ref_first_wt_bwdupd_f32_kc, first_wt_bwdupd_f32, 0, 0);
printf("##########################################\n");
printf("# Correctness - First bwdupd wt #\n");
printf("##########################################\n");
printf("L1 reference : %.25g\n", norms_bwd.l1_ref);
printf("L1 test : %.25g\n", norms_bwd.l1_tst);
printf("L2 abs.error : %.24f\n", norms_bwd.l2_abs);
printf("L2 rel.error : %.24f\n", norms_bwd.l2_rel);
printf("Linf abs.error: %.24f\n", norms_bwd.linf_abs);
printf("Linf rel.error: %.24f\n", norms_bwd.linf_rel);
printf("Check-norm : %.24f\n", norms_bwd.normf_rel);
libxsmm_matdiff_reduce(&diff, &norms_bwd);
free(ref_last_act_fwd_f32);
free(ref_first_wt_bwdupd_f32);
free(ref_first_wt_bwdupd_f32_kc);
free(first_wt_bwdupd_bf16);
}
#endif
free(first_wt_bwdupd_f32);
free(last_act_fwd_f32);
#endif
gflop = 0.0;
for ( i = num_layers-1; i > 0; --i) {
gflop += (6.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0);
}
gflop += (4.0*(double)MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0);
printf("GFLOP = %.5g\n", gflop/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total);
}
/* deallocate data */
if ( scratch != NULL ) {
libxsmm_free(scratch);
}
for ( i = 0; i < num_layers; ++i ) {
if ( i == 0 ) {
#ifdef ACT_NUMA_INTERLEAVED
numa_free(act_libxsmm[i], MB*C[i]*sizeof(libxsmm_bfloat16));
#else
libxsmm_free(act_libxsmm[i]);
#endif
libxsmm_free(delact_libxsmm[i]);
}
#ifdef ACT_NUMA_INTERLEAVED
numa_free(act_libxsmm[i+1], MB*C[i+1]*sizeof(libxsmm_bfloat16));
#else
libxsmm_free(act_libxsmm[i+1]);
#endif
libxsmm_free(delact_libxsmm[i+1]);
libxsmm_free(fil_libxsmm[i]);
libxsmm_free(delfil_libxsmm[i]);
libxsmm_free(bias_libxsmm[i]);
libxsmm_free(delbias_libxsmm[i]);
libxsmm_free(relumask_libxsmm[i]);
libxsmm_free(fil_master[i]);
}
#ifdef ACT_NUMA_INTERLEAVED
numa_free(act_libxsmm[num_layers+1], MB*C[num_layers+1]*sizeof(libxsmm_bfloat16));
#else
libxsmm_free(act_libxsmm[num_layers+1]);
#endif
libxsmm_free(label_libxsmm);
for (i = 0; i < numa_num_configured_nodes(); i++) {
free(numa_thr_cfg[i].blocksOFm_s);
free(numa_thr_cfg[i].blocksOFm_e);
for (j = 0; j < num_layers; j++)
numa_free_aligned(numa_thr_cfg[i].scratch[j], numa_thr_cfg[i].layer_size[j]);
free(numa_thr_cfg[i].scratch);
free(numa_thr_cfg[i].layer_size);
}
free(numa_thr_cfg);
free( my_opt );
free( my_fc_fwd );
free( my_fc_bwd );
free( act_libxsmm );
free( delact_libxsmm );
free( fil_master );
free( fil_libxsmm );
free( delfil_libxsmm );
free( bias_libxsmm );
free( delbias_libxsmm );
free( relumask_libxsmm );
free( C );
/* some empty lines at the end */
printf("\n\n\n");
return 0;
}
|
dijkstra_9_OMP.c | /*
Code owned by Geeks for Geeks
Source: https://www.geeksforgeeks.org/dijkstras-shortest-path-algorithm-greedy-algo-7/
*/
#include <omp.h>
#include <limits.h>
#include <stdio.h>
#include <stdbool.h>
#include "timer.h"
// Number of vertices in the graph
#define V 9
// A utility function to find the vertex with minimum distance value, from
// the set of vertices not yet included in shortest path tree
int minDistance(int dist[], bool sptSet[])
{
// Initialize min value
int min = INT_MAX, min_index;
#pragma omp parallel for
for (int v = 0; v < V; v++)
if (sptSet[v] == false && dist[v] <= min)
min = dist[v], min_index = v;
return min_index;
}
// A utility function to print the constructed distance array
void printSolution(int dist[])
{
printf("Vertex \t\t Distance from Source\n");
#pragma omp parallel for
for (int i = 0; i < V; i++)
printf("%d \t\t %d\n", i, dist[i]);
}
// Function that implements Dijkstra's single source shortest path algorithm
// for a graph represented using adjacency matrix representation
void dijkstra(int graph[V][V], int src)
{
int dist[V]; // The output array. dist[i] will hold the shortest
// distance from src to i
bool sptSet[V]; // sptSet[i] will be true if vertex i is included in shortest
// path tree or shortest distance from src to i is finalized
// Initialize all distances as INFINITE and stpSet[] as false
#pragma omp parallel shared (dist, sptSet, src, graph)
#pragma omp parallel for
for (int i = 0; i < V; i++)
dist[i] = INT_MAX, sptSet[i] = false;
// Distance of source vertex from itself is always 0
dist[src] = 0;
// Find shortest path for all vertices
#pragma omp parallel for
for (int count = 0; count < V - 1; count++) {
// Pick the minimum distance vertex from the set of vertices not
// yet processed. u is always equal to src in the first iteration.
int u = minDistance(dist, sptSet);
// Mark the picked vertex as processed
sptSet[u] = true;
// Update dist value of the adjacent vertices of the picked vertex.
for (int v = 0; v < V; v++)
// Update dist[v] only if is not in sptSet, there is an edge from
// u to v, and total weight of path from src to v through u is
// smaller than current value of dist[v]
if (!sptSet[v] && graph[u][v] && dist[u] != INT_MAX
&& dist[u] + graph[u][v] < dist[v])
dist[v] = dist[u] + graph[u][v];
}
// print the constructed distance array
printSolution(dist);
}
// driver program to test above function
int main()
{
/* Let us create the example graph discussed above */
int graph[V][V] = { { 0, 4, 0, 0, 0, 0, 0, 8, 0 },
{ 4, 0, 8, 0, 0, 0, 0, 11, 0 },
{ 0, 8, 0, 7, 0, 4, 0, 0, 2 },
{ 0, 0, 7, 0, 9, 14, 0, 0, 0 },
{ 0, 0, 0, 9, 0, 10, 0, 0, 0 },
{ 0, 0, 4, 14, 10, 0, 2, 0, 0 },
{ 0, 0, 0, 0, 0, 2, 0, 1, 6 },
{ 8, 11, 0, 0, 0, 0, 1, 0, 7 },
{ 0, 0, 2, 0, 0, 0, 6, 7, 0 } };
StartTimer();
dijkstra(graph, 0);
double runtime = GetTimer();
printf(" total: %f s\n", runtime / 1000);
return 0;
}
|
MoleculeService.h | // Copyright (C) 2015 Technische Universitaet Muenchen
// This file is part of the Mamico project. For conditions of distribution
// and use, please see the copyright notice in Mamico's main folder, or at
// www5.in.tum.de/mamico
#ifndef _MOLECULARDYNAMICS_SERVICES_MOLECULESERVICE_H_
#define _MOLECULARDYNAMICS_SERVICES_MOLECULESERVICE_H_
#include "simplemd/Molecule.h"
#include "simplemd/MolecularDynamicsDefinitions.h"
#include "tarch/utils/RandomNumberService.h"
#include "simplemd/services/MolecularPropertiesService.h"
#include "simplemd/molecule-mappings/ComputeMeanVelocityMapping.h"
#include "simplemd/molecule-mappings/SetMeanVelocityMapping.h"
#include <list>
#include <cmath>
#include <iostream>
#include <cstdlib>
#include <cstdio>
#include <vector>
#include <fstream>
#include <sstream>
#if (MD_OPENMP == MD_YES)
#include <omp.h>
#endif
namespace simplemd {
namespace services {
class MoleculeService;
// forward declarations to remove circular dependencies
class ParallelTopologyService;
class LinkedCellService;
}
}
/** data service storing and managing all molecules which are lying on one process.
*
* @author Philipp Neumann
*/
class simplemd::services::MoleculeService {
public:
~MoleculeService();
/** initialises the molecules. Therefore, the molecules are put onto a regular Cartesian grid with moleculesPerDirection
* molecules in each spatial direction within the domain described by domainSize and domainOffset.
* meanVelocity describes a mean flow velocity, temperature a temperature (controlling fluctuations).
* In addition, the numberMoleculesPerAllocation can be set: In case that molecules are added to the system, we need to
* allocate more memory. If more memory is needed, a block of numberMoleculesPerAllocation is to be introduced.
*/
MoleculeService(
const tarch::la::Vector<MD_DIM,double> &domainSize,
const tarch::la::Vector<MD_DIM,double> &domainOffset,
const tarch::la::Vector<MD_DIM,unsigned int> &moleculesPerDirection,
const tarch::la::Vector<MD_DIM,double> &meanVelocity,
const double &kB,
const double &temperature,
const unsigned int& blockSize,
const simplemd::services::MolecularPropertiesService& molecularPropertiesService
);
/** initialises the MD simulation from a checkpoint-file. For a parallel simulation, this method parses
* checkpoint files for each rank, respectively. If multiple MD simulations are executed, make sure that the rank of the current
* MD simulation matches the respective rank of the checkpoint file.
*/
MoleculeService(
const tarch::la::Vector<MD_DIM,double> &domainSize,
const tarch::la::Vector<MD_DIM,double> &domainOffset,
const std::string &checkPointFileStem,
const unsigned int &blockSize,
const simplemd::services::ParallelTopologyService& parallelTopologyService
);
/** initialises a potentially parallel MD simulation from a sequential checkpoint file. */
MoleculeService(
const tarch::la::Vector<MD_DIM,double> &domainSize,
const tarch::la::Vector<MD_DIM,double> &domainOffset,
const std::string &checkPointFileStem,
const unsigned int &blockSize
);
/** adds a molecule to the system. The molecule data are copied from the const. reference to a free position within the memory field
* or - in case no memory is available - new memory is allocated and the molecule is put in there. Besides, the list _freeMoleculePositions
* is adapted, accordingly (and _numberMolecules is incremented).
* The function returns a pointer to the new molecule. If something goes wrong, NULL is returned.
*/
Molecule* addMolecule(const Molecule& molecule);
/** returns the number of molecules */
const unsigned int& getNumberMolecules() const;
/** deletes a molecule from the system by adding its position to the _freeMoleculePositions-list. */
void deleteMolecule(Molecule& molecule);
/** shuts down the service */
void shutdown();
/** can be used to apply a molecule-mapping which is iterated over all molecules of this process
* (e.g. time integration)
*/
template<class A>
void iterateMolecules(A& a,const bool &useOpenMP);
/** creates initial velocity for molecule from meanVelocity and given temperature and stores the result in initialVelocity */
void getInitialVelocity(
const tarch::la::Vector<MD_DIM,double>& meanVelocity, const double &kB, const double& temperature,
const simplemd::services::MolecularPropertiesService &molecularPropertiesService,
tarch::la::Vector<MD_DIM,double>& initialVelocity
) const;
/** reorganises the storage of the molecules. If a simulation requires a big number of
* molecule deletions/ insertions, e.g., this might be useful to speed up the simulation.
* Besides, the molecules are stored in memory such that those molecules belonging to the same linked cell are
* located very closely in memory (actually, we sort the molecules according to their linked cell position, that is
* lexicographically w.r.t. to the linked cell index, and store them in this sequence).
*/
void reorganiseMemory(const simplemd::services::ParallelTopologyService& parallelTopologyService, simplemd::services::LinkedCellService& linkedCellService);
/** writes a checkpoint containing:
* - the number of molecules and the dimension of the problem (1,2 or 3) in one line
* - each molecule in one line consisting of position, velocity and force_old.
* In parallel cases, each process writes its own checkpoint data. The file will be named
* filestem_t_rank.dat in any case (rank=0 in the serial case).
* The mapping WriteCheckPointMapping is used.
*/
void writeCheckPoint(const simplemd::services::ParallelTopologyService& parallelTopologyService, const std::string &filestem,const unsigned int& t);
/** resets the velocity over the whole molecule system to the mean velocity specified at the beginning */
void resetMeanVelocity();
private:
/** pointer to all the molecules */
std::vector<simplemd::Molecule* > _molecules;
/** stores the mean velocity for normalisation */
tarch::la::Vector<MD_DIM,double> _meanVelocity;
/** number of molecules stored in memory */
unsigned int _numberMolecules;
/** positions within the _molecules array where a molecule can be inserted */
std::list<unsigned int> _freeMoleculePositions;
/** number of molecules that are stored in one memory block */
unsigned int _blockSize;
};
template<class A>
void simplemd::services::MoleculeService::iterateMolecules(A& a,const bool &useOpenMP){
const unsigned int blockSize = _blockSize;
const unsigned int freeMoleculePositions = (const unsigned int)_freeMoleculePositions.size();
const unsigned int numberMolecules = _numberMolecules;
const unsigned int freeMoleculePositionsAndNumberMolecules = (const unsigned int)(numberMolecules+freeMoleculePositions);
// start iteration();
a.beginMoleculeIteration();
// open MP
#if (MD_OPENMP==MD_YES)
if (useOpenMP){
// sort empty positions list
if (!_freeMoleculePositions.empty()){
_freeMoleculePositions.sort();
std::list<unsigned int>::iterator myIt = _freeMoleculePositions.begin();
unsigned int start = 0;
// loop over all intervals, starting at a certain point and ranging up to a deleted position
for (unsigned int i = 0; i < freeMoleculePositions; i++){
const unsigned int end = (*myIt);
#pragma omp parallel for
for (unsigned int j = start; j < end; j++){
#if (MD_DEBUG == MD_YES)
std::cout << "Handle molecule " << j << std::endl;
#endif
a.handleMolecule(_molecules[j/blockSize][j%blockSize]);
}
// go to next possible start position (one position after *myIt) and increment myIt
// -> remark: It may happen that *(myIt++) == (*myIt)+1. Then, the upper inner loop degenerates to
// an empty loop...
start = end+1;
myIt++;
}
// do final loop (from last deleted molecule to last existing molecule)
#pragma omp parallel for
for (unsigned int i = start; i < freeMoleculePositionsAndNumberMolecules; i++){
#if (MD_DEBUG == MD_YES)
std::cout << "Handle molecule " << i << std::endl;
#endif
a.handleMolecule(_molecules[i/blockSize][i%blockSize]);
}
} else {
#pragma omp parallel for
for (unsigned int i = 0; i < numberMolecules; i++){
#if (MD_DEBUG == MD_YES)
std::cout << "Handle molecule " << i << std::endl;
#endif
a.handleMolecule(_molecules[i/blockSize][i%blockSize]);
}
}
// no Open MP
} else {
#endif
// sort empty positions list
if (!_freeMoleculePositions.empty()){
_freeMoleculePositions.sort();
std::list<unsigned int>::iterator myIt = _freeMoleculePositions.begin();
unsigned int start = 0;
// loop over all intervals, starting at a certain point and ranging up to a deleted position
for (unsigned int i = 0; i < freeMoleculePositions; i++){
const unsigned int end = *myIt;
for (unsigned int j = start; j < end; j++){
#if (MD_DEBUG == MD_YES)
std::cout << "Handle molecule " << j << std::endl;
#endif
a.handleMolecule(_molecules[j/blockSize][j%blockSize]);
}
// go to next possible start position (one position after *myIt) and increment myIt
// -> remark: It may happen that *(myIt++) == (*myIt)+1. Then, the upper inner loop degenerates to
// an empty loop...
start = (*myIt)+1;
myIt++;
}
// do final loop (from last deleted molecule to last existing molecule)
for (unsigned int i = start; i < freeMoleculePositionsAndNumberMolecules; i++){
#if (MD_DEBUG == MD_YES)
std::cout << "Handle molecule " << i << std::endl;
#endif
a.handleMolecule(_molecules[i/blockSize][i%blockSize]);
}
} else {
for (unsigned int i = 0; i < numberMolecules; i++){
#if (MD_DEBUG == MD_YES)
std::cout << "Handle molecule " << i << std::endl;
#endif
a.handleMolecule(_molecules[i/blockSize][i%blockSize]);
}
}
#if (MD_OPENMP==MD_YES)
}
#endif
// end iteration();
a.endMoleculeIteration();
}
#endif // _MOLECULARDYNAMICS_SERVICES_MOLECULESERVICE_H_
|
GB_binop__iseq_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_uint16)
// A*D function (colscale): GB (_AxD__iseq_uint16)
// D*A function (rowscale): GB (_DxB__iseq_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_uint16)
// C=scalar+B GB (_bind1st__iseq_uint16)
// C=scalar+B' GB (_bind1st_tran__iseq_uint16)
// C=A+scalar GB (_bind2nd__iseq_uint16)
// C=A'+scalar GB (_bind2nd_tran__iseq_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_UINT16 || GxB_NO_ISEQ_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__atan_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__atan_fp32_fp32)
// op(A') function: GB (_unop_tran__atan_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = atanf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = atanf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = atanf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ATAN || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__atan_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = atanf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = atanf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__atan_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
energyOrig.h | #pragma once
#include "core.h"
#include "geometry.h"
#include "space.h"
#include "potentials.h"
#include "multipole.h"
#include "penalty.h"
#include "mpi.h"
#include <Eigen/Dense>
#include <set>
#ifdef FAU_POWERSASA
#include <power_sasa.h>
#endif
namespace Faunus {
namespace Energy {
class Energybase {
public:
enum keys {OLD, NEW, NONE};
keys key=NONE;
std::string name;
std::string cite;
virtual double energy(Change&)=0; //!< energy due to change
inline virtual void to_json(json &j) const {}; //!< json output
inline virtual void sync(Energybase*, Change&) {}
};
void to_json(json &j, const Energybase &base) {
assert(!base.name.empty());
if (!base.cite.empty())
j[base.name]["reference"] = base.cite;
base.to_json( j[base.name] );
} //!< Converts any energy class to json object
/**
* This holds Ewald setup and must *not* depend on particle type, nor depend on Space
*/
struct EwaldData {
typedef std::complex<double> Tcomplex;
Eigen::Matrix3Xd kVectors; // k-vectors, 3xK
Eigen::VectorXd Aks; // 1xK, to minimize computational effort (Eq.24,DOI:10.1063/1.481216)
Eigen::VectorXcd Qion, Qdip; // 1xK
double alpha, rc, kc, check_k2_zero, lB;
double const_inf, eps_surf;
bool spherical_sum=true;
bool ipbc=false;
int kVectorsInUse=0;
Point L; //!< Box dimensions
void update(const Point &box) {
L = box;
int kcc = std::ceil(kc);
check_k2_zero = 0.1*std::pow(2*pc::pi/L.maxCoeff(), 2);
int kVectorsLength = (2*kcc+1) * (2*kcc+1) * (2*kcc+1) - 1;
if (kVectorsLength == 0) {
kVectors.resize(3,1);
Aks.resize(1);
kVectors.col(0) = Point(1,0,0); // Just so it is not the zero-vector
Aks[0] = 0;
kVectorsInUse = 1;
Qion.resize(1);
Qdip.resize(1);
} else {
double kc2 = kc*kc;
kVectors.resize(3, kVectorsLength);
Aks.resize(kVectorsLength);
kVectorsInUse = 0;
kVectors.setZero();
Aks.setZero();
int startValue = 1 - int(ipbc);
double factor = 1;
for (int kx = 0; kx <= kcc; kx++) {
double dkx2 = double(kx*kx);
for (int ky = -kcc*startValue; ky <= kcc; ky++) {
double dky2 = double(ky*ky);
for (int kz = -kcc*startValue; kz <= kcc; kz++) {
double factor = 1.0;
if(kx > 0)
factor *= 2;
if(ky > 0 && ipbc)
factor *= 2;
if(kz > 0 && ipbc)
factor *= 2;
double dkz2 = double(kz*kz);
Point kv = 2*pc::pi*Point(kx/L.x(),ky/L.y(),kz/L.z());
double k2 = kv.dot(kv);
if (k2 < check_k2_zero) // Check if k2 != 0
continue;
if (spherical_sum)
if( (dkx2/kc2) + (dky2/kc2) + (dkz2/kc2) > 1)
continue;
kVectors.col(kVectorsInUse) = kv;
Aks[kVectorsInUse] = factor*std::exp(-k2/(4*alpha*alpha))/k2;
kVectorsInUse++;
}
}
}
Qion.resize(kVectorsInUse);
Qdip.resize(kVectorsInUse);
Aks.conservativeResize(kVectorsInUse);
kVectors.conservativeResize(3,kVectorsInUse);
}
}
};
void from_json(const json &j, EwaldData &d) {
d.alpha = j.at("alpha");
d.rc = j.at("cutoff");
d.kc = j.at("kcutoff");
d.ipbc = j.value("ipbc", false);
d.spherical_sum = j.value("spherical_sum", true);
d.lB = pc::lB( j.at("epsr") );
d.eps_surf = j.value("epss", 0.0);
d.const_inf = (d.eps_surf < 1) ? 0 : 1; // if unphysical (<1) use epsr infinity for surrounding medium
}
void to_json(json &j, const EwaldData &d) {
j = {{"lB", d.lB}, {"ipbc", d.ipbc}, {"epss", d.eps_surf},
{"alpha", d.alpha}, {"cutoff", d.rc}, {"kcutoff", d.kc},
{"wavefunctions", d.kVectors.cols()}, {"spherical_sum", d.spherical_sum}};
}
#ifdef DOCTEST_LIBRARY_INCLUDED
TEST_CASE("[Faunus] Ewald - EwaldData")
{
using doctest::Approx;
EwaldData data = R"({
"ipbc": false, "epsr": 1.0, "alpha": 0.894427190999916, "epss": 1.0,
"kcutoff": 11.0, "spherical_sum": true, "cutoff": 5.0})"_json;
data.update( Point(10,10,10) );
CHECK(data.ipbc == false);
CHECK(data.const_inf == 1);
CHECK(data.alpha == 0.894427190999916);
CHECK(data.kVectors.cols() == 2975);
CHECK(data.Qion.size() == data.kVectors.cols());
data.ipbc=true;
data.update( Point(10,10,10) );
CHECK(data.kVectors.cols() == 846);
CHECK(data.Qion.size() == data.kVectors.cols());
}
#endif
/** @brief recipe or policies for ion-ion ewald */
template<class Tspace, bool eigenopt=false /** use Eigen matrix ops where possible */>
struct PolicyIonIon {
typedef typename Tspace::Tpvec::iterator iter;
Tspace *spc;
Tspace *old=nullptr; // set only if key==NEW at first call to `sync()`
PolicyIonIon(Tspace &spc) : spc(&spc) {}
void updateComplex(EwaldData &data) const {
if (eigenopt)
if (data.ipbc==false) {
auto pos = asEigenMatrix(spc->p.begin(), spc->p.end(), &Tspace::Tparticle::pos); // Nx3
auto charge = asEigenVector(spc->p.begin(), spc->p.end(), &Tspace::Tparticle::charge); // Nx1
Eigen::MatrixXd kr = pos.matrix() * data.kVectors; // Nx3 * 3xK = NxK
data.Qion.real() = (kr.array().cos().colwise()*charge).colwise().sum();
data.Qion.imag() = kr.array().sin().colwise().sum();
return;
}
for (int k=0; k<data.kVectors.cols(); k++) {
const Point& kv = data.kVectors.col(k);
EwaldData::Tcomplex Q(0,0);
if (data.ipbc)
for (auto &i : spc->p)
Q += kv.cwiseProduct(i.pos).array().cos().prod() * i.charge;
else
for (auto &i : spc->p) {
double dot = kv.dot(i.pos);
Q += i.charge * EwaldData::Tcomplex( std::cos(dot), std::sin(dot) );
}
data.Qion[k] = Q;
}
} //!< Update all k vectors
void updateComplex(EwaldData &data, iter begin, iter end) const {
assert(old!=nullptr);
assert(spc->p.size() == old->p.size());
size_t ibeg = std::distance(spc->p.begin(), begin); // it->index
size_t iend = std::distance(spc->p.begin(), end); // it->index
for (int k=0; k<data.kVectors.cols(); k++) {
auto& Q = data.Qion[k];
Point q = data.kVectors.col(k);
if (data.ipbc)
for (size_t i=ibeg; i<=iend; i++) {
Q += q.cwiseProduct( spc->p[i].pos ).array().cos().prod() * spc->p[i].charge;
Q -= q.cwiseProduct( old->p[i].pos ).array().cos().prod() * old->p[i].charge;
}
else
for (size_t i=ibeg; i<=iend; i++) {
double _new = q.dot(spc->p[i].pos);
double _old = q.dot(old->p[i].pos);
Q += spc->p[i].charge * EwaldData::Tcomplex( std::cos(_new), std::sin(_new) );
Q -= old->p[i].charge * EwaldData::Tcomplex( std::cos(_old), std::sin(_old) );
}
}
} //!< Optimized update of k subset. Require access to old positions through `old` pointer
double selfEnergy(const EwaldData &d) {
double E = 0;
for (auto& i : spc->p)
E += i.charge * i.charge;
return -d.alpha*E / std::sqrt(pc::pi) * d.lB;
}
double surfaceEnergy(const EwaldData &d) {
if (d.const_inf < 0.5)
return 0;
Point qr(0,0,0);
for (auto &i : spc->p)
qr += i.charge*i.pos;
return d.const_inf * 2 * pc::pi / ( (2*d.eps_surf+1) * spc->geo.getVolume() ) * qr.dot(qr) * d.lB;
}
double reciprocalEnergy(const EwaldData &d) {
double E = 0;
if (eigenopt) // known at compile time
E = d.Aks.cwiseProduct( d.Qion.cwiseAbs2() ).sum();
else
for (size_t k=0; k<d.Qion.size(); k++)
E += d.Aks[k] * std::norm( d.Qion[k] );
return 2 * pc::pi / spc->geo.getVolume() * E * d.lB;
}
};
#ifdef DOCTEST_LIBRARY_INCLUDED
TEST_CASE("[Faunus] Ewald - IonIonPolicy")
{
using doctest::Approx;
typedef Space<Geometry::Cuboid, Particle<Charge,Dipole>> Tspace;
Tspace spc;
spc.p.resize(2);
spc.geo = R"( {"length": 10} )"_json;
spc.p[0] = R"( {"pos": [0,0,0], "q": 1.0} )"_json;
spc.p[1] = R"( {"pos": [1,0,0], "q": -1.0} )"_json;
PolicyIonIon<Tspace> ionion(spc);
EwaldData data = R"({
"epsr": 1.0, "alpha": 0.894427190999916, "epss": 1.0,
"kcutoff": 11.0, "spherical_sum": true, "cutoff": 5.0})"_json;
data.ipbc = false; // PBC Ewald (http://dx.doi.org/10.1063/1.481216)
data.update( spc.geo.getLength() );
ionion.updateComplex( data );
CHECK( ionion.selfEnergy(data) == Approx(-1.0092530088080642*data.lB) );
CHECK( ionion.surfaceEnergy(data) == Approx(0.0020943951023931952*data.lB) );
CHECK( ionion.reciprocalEnergy(data) == Approx(0.21303063979675319*data.lB) );
data.ipbc = true; // IPBC Ewald
data.update( spc.geo.getLength() );
ionion.updateComplex( data );
CHECK( ionion.selfEnergy(data) == Approx(-1.0092530088080642*data.lB) );
CHECK( ionion.surfaceEnergy(data) == Approx(0.0020943951023931952*data.lB) );
CHECK( ionion.reciprocalEnergy(data) == Approx(0.0865107467*data.lB) );
}
#endif
/** @brief Ewald summation reciprocal energy */
template<class Tspace, class Policy=PolicyIonIon<Tspace>>
class Ewald : public Energybase {
private:
EwaldData data;
Policy policy;
public:
Tspace& spc;
Ewald(const json &j, Tspace &spc) : spc(spc), policy(spc) {
name = "ewald";
data = j;
data.update( spc.geo.getLength() );
policy.updateComplex(data); // brute force. todo: be selective
}
double energy(Change &change) override {
double u=0;
if (!change.empty()) {
// If the state is NEW (trial state), then update all k-vectors
if (key==NEW) {
if (change.all || change.dV) { // everything changes
data.update( spc.geo.getLength() );
policy.updateComplex(data); // update all (expensive!)
}
else {
if (change.groups.size()==1) { // exactly one group is moved
auto& d = change.groups[0];
auto& g = spc.groups[d.index];
if (d.atoms.size()==1) // exactly one atom is moved
policy.updateComplex(data, g.begin()+d.atoms[0], g.begin()+d.atoms[0]);
else
policy.updateComplex(data, g.begin(), g.end());
} else
policy.updateComplex(data);
}
}
u = policy.selfEnergy(data) + policy.surfaceEnergy(data) + policy.reciprocalEnergy(data);
}
return u;
}
void sync(Energybase *basePtr, Change &change) override {
auto other = dynamic_cast<decltype(this)>(basePtr);
assert(other);
if (other->key==OLD)
policy.old = &(other->spc); // give NEW access to OLD space for optimized updates
data = other->data; // copy everything!
} //!< Called after a move is rejected/accepted as well as before simulation
void to_json(json &j) const override {
j = data;
}
};
template<typename Tspace>
class Isobaric : public Energybase {
private:
Tspace& spc;
double P; // P/kT
public:
Isobaric(const json &j, Tspace &spc) : spc(spc) {
name = "isobaric";
cite = "Frenkel & Smith 2nd Ed (Eq. 5.4.13)";
P = j.value("P/mM", 0.0) * 1.0_mM;
if (P<1e-10) {
P = j.value("P/Pa", 0.0) * 1.0_Pa;
if (P<1e-10)
P = j.at("P/atm").get<double>() * 1.0_atm;
}
}
double energy(Change &change) override {
if (change.dV || change.all) {
double V = spc.geo.getVolume();
size_t N=0;
for (auto &g : spc.groups)
if (!g.empty()) {
if (g.atomic)
N += g.size();
else
N++;
}
return P*V-(N+1)*std::log(V);
} else return 0;
}
void to_json(json &j) const override {
j["P/atm"] = P / 1.0_atm;
j["P/mM"] = P / 1.0_mM;
j["P/Pa"] = P / 1.0_Pa;
_roundjson(j,5);
}
};
template<typename Tspace>
class ExternalPotential : public Energybase {
protected:
typedef typename Tspace::Tpvec Tpvec;
typedef typename Tspace::Tparticle Tparticle;
bool COM=false; // apply on center-of-mass
Tspace& spc;
std::set<int> molids; // molecules to act upon
std::function<double(const Tparticle&)> func=nullptr; // energy of single particle
std::vector<std::string> _names;
template<class Tparticle>
double _energy(const Group<Tparticle> &g) const {
double u=0;
if (molids.find(g.id)!=molids.end()) {
if (COM) { // apply only to center of mass
Tparticle dummy;
dummy.pos = g.cm;
u = func(dummy);
} else {
for (auto &p : g) {
u += func(p);
if (std::isnan(u))
break;
}
}
}
return u;
} //!< External potential on a single particle
public:
ExternalPotential(const json &j, Tspace &spc) : spc(spc) {
name="external";
COM = j.value("com", false);
_names = j.at("molecules").get<decltype(_names)>(); // molecule names
auto _ids = names2ids(molecules<Tpvec>, _names); // names --> molids
molids = std::set<int>(_ids.begin(), _ids.end()); // vector --> set
if (molids.empty() || molids.size()!=_names.size() )
throw std::runtime_error(name + ": molecule list is empty");
}
double energy(Change &change) override {
assert(func!=nullptr);
double u=0;
if (change.dV || change.all) {
for (auto &g : spc.groups) { // check all groups
u += _energy(g);
if (std::isnan(u))
break;
}
} else
for (auto &d : change.groups) {
auto &g = spc.groups.at(d.index); // check specified groups
if (d.all || COM) // check all atoms in group
u += _energy(g);
else { // check only specified atoms in group
if (molids.find(g.id)!=molids.end())
for (auto i : d.atoms)
u += func( *(g.begin()+i) );
}
if (std::isnan(u))
break;
}
return u;
}
void to_json(json &j) const override {
j["molecules"] = _names;
j["com"] = COM;
}
}; //!< Base class for external potentials, acting on particles
template<typename Tspace, typename base=ExternalPotential<Tspace>>
class Confine : public base {
public:
enum Variant {sphere, cylinder, cuboid, none};
Variant type=none;
private:
Point origo={0,0,0}, dir={1,1,1};
Point low, high;
double radius, k;
bool scale=false;
std::map<std::string, Variant> m = {
{"sphere", sphere}, {"cylinder", cylinder}, {"cuboid", cuboid}
};
public:
Confine(const json &j, Tspace &spc) : base(j,spc) {
base::name = "confine";
k = value_inf(j, "k") * 1.0_kJmol; // get floating point; allow inf/-inf
type = m.at( j.at("type") );
if (type==sphere || type==cylinder) {
radius = j.at("radius");
origo = j.value("origo", origo);
scale = j.value("scale", scale);
if (type==cylinder)
dir = {1,1,0};
base::func = [&radius=radius, origo=origo, k=k, dir=dir](const typename base::Tparticle &p) {
double d2 = (origo-p.pos).cwiseProduct(dir).squaredNorm() - radius*radius;
if (d2>0)
return 0.5*k*d2;
return 0.0;
};
// If volume is scaled, also scale the confining radius by adding a trigger
// to `Space::scaleVolume()`
if (scale)
spc.scaleVolumeTriggers.push_back( [&radius=radius](Tspace &spc, double Vold, double Vnew) {
radius *= std::cbrt(Vnew/Vold); } );
}
if (type==cuboid) {
low = j.at("low").get<Point>();
high = j.at("high").get<Point>();
base::func = [low=low, high=high, k=k](const typename base::Tparticle &p) {
double u=0;
Point d = low-p.pos;
for (int i=0; i<3; ++i)
if (d[i]>0) u+=d[i]*d[i];
d = p.pos-high;
for (int i=0; i<3; ++i)
if (d[i]>0) u+=d[i]*d[i];
return 0.5*k*u;
};
}
}
void to_json(json &j) const override {
if (type==cuboid)
j = {{"low", low}, {"high", high}};
if (type==sphere || type==cylinder)
j = {{"radius", radius}};
if (type==sphere) {
j["origo"] = origo;
j["scale"] = scale;
}
for (auto &i : m)
if (i.second==type)
j["type"] = i.first;
j["k"] = k/1.0_kJmol;
base::to_json(j);
_roundjson(j,5);
}
}; //!< Confine particles to a sub-region of the simulation container
/*
* The keys of the `intra` map are group index and the values
* is a vector of `BondData`. For bonds between groups, fill
* in `inter` which is evaluated for every update of call to
* `energy`.
*
* @todo Optimize.
*/
template<typename Tspace>
class Bonded : public Energybase {
private:
Tspace& spc;
typedef typename Tspace::Tpvec Tpvec;
typedef std::vector<Potential::BondData> BondVector;
BondVector inter; // inter-molecular bonds
std::map<int,BondVector> intra; // intra-molecular bonds
void update() {
intra.clear();
for (size_t i=0; i<spc.groups.size(); i++) {
if (!spc.groups.empty()) {
auto &g = spc.groups[i];
intra[i] = molecules<Tpvec>.at(g.id).bonds;
for (auto &b : intra[i])
b.shift( std::distance(spc.p.begin(), g.begin()) );
}
}
} // finds and adds all intra-molecular bonds of active molecules
double sum( const BondVector &v ) const {
double u=0;
for (auto &b : v)
u += b.energy(spc.p, spc.geo.distanceFunc);
return u;
} // sum energy in vector of BondData
public:
Bonded(const json &j, Tspace &spc) : spc(spc) {
name = "bonded";
update();
if (j.is_object())
if (j.count("bondlist")==1)
inter = j["bondlist"].get<BondVector>();
}
void to_json(json &j) const override {
if (!inter.empty())
j["bondlist"] = inter;
if (!intra.empty()) {
json& _j = j["bondlist-intramolecular"];
_j = json::array();
for (auto &i : intra)
for (auto &b : i.second)
_j.push_back(b);
}
}
double energy(Change &c) override {
double u=0;
if ( !c.empty() ) {
u = sum(inter); // energy of inter-molecular bonds
if ( c.all || c.dV ) {
for (auto& i : intra) // energy of intra-molecular bonds
if (!spc.groups[i.first].empty()) // add only if group is active
u += sum(i.second);
} else
for (auto &d : c.groups)
if (d.internal)
u += sum( intra[d.index] );
}
return u;
}; // brute force -- refine this!
};
/**
* @brief Nonbonded energy using a pair-potential
*/
template<typename Tspace, typename Tpairpot>
class Nonbonded : public Energybase {
private:
double g2gcnt=0, g2gskip=0;
protected:
typedef typename Tspace::Tgroup Tgroup;
double Rc2_g2g=pc::infty;
void to_json(json &j) const override {
j["pairpot"] = pairpot;
j["cutoff_g2g"] = std::sqrt(Rc2_g2g);
}
template<typename T>
inline bool cut(const T &g1, const T &g2) {
g2gcnt++;
if (g1.atomic || g2.atomic)
return false;
if ( spc.geo.sqdist(g1.cm, g2.cm)<Rc2_g2g )
return false;
g2gskip++;
return true;
} //!< true if group<->group interaction can be skipped
template<typename T>
inline double i2i(const T &a, const T &b) {
assert(&a!=&b && "a and b cannot be the same particle");
return pairpot(a, b, spc.geo.vdist(a.pos, b.pos));
}
/*
* Internal energy in group, calculating all with all or, if `index`
* is given, only a subset. Index specifies the internal index (starting
* at zero) of changed particles within the group.
*/
double g_internal(const Tgroup &g, const std::vector<int> &index=std::vector<int>()) {
using namespace ranges;
double u=0;
if (index.empty()) // assume that all atoms have changed
for ( auto i = g.begin(); i != g.end(); ++i )
for ( auto j=i; ++j != g.end(); )
u += i2i(*i, *j);
else { // only a subset have changed
auto fixed = view::ints( 0, int(g.size()) )
| view::remove_if(
[&index](int i){return std::binary_search(index.begin(), index.end(), i);});
for (int i : index) // moved<->static
for (int j : fixed)
u += i2i( *(g.begin()+i), *(g.begin()+j));
for (int i : index) // moved<->moved
for (int j : index)
if (j>i)
u += i2i( *(g.begin()+i), *(g.begin()+j));
}
return u;
}
/*
* Calculates the interaction energy of a particle, `i`,
* and checks (1) if it is already part of Space, or (2)
* external to space.
*/
double i2all(const typename Tspace::Tparticle &i) {
double u=0;
auto it = spc.findGroupContaining(i); // iterator to group
if (it!=spc.groups.end()) { // check if i belongs to group in space
for (auto &g : spc.groups) // i with all other particles
if (&g!=&(*it)) // avoid self-interaction
if (!cut(g, *it)) // check g2g cut-off
for (auto &j : g) // loop over particles in other group
u += i2i(i,j);
for (auto &j : *it) // i with all particles in own group
if (&j!=&i)
u += i2i(i,j);
} else // particle does not belong to any group
for (auto &g : spc.groups) // i with all other *active* particles
for (auto &j : g) // (this will include only active particles)
u += i2i(i,j);
return u;
}
/*
* Group-to-group energy. A subset of `g1` can be given with `index` which refers
* to the internal index (starting at zero) of the first group, `g1`.
*/
virtual double g2g(const Tgroup &g1, const Tgroup &g2, const std::vector<int> &index=std::vector<int>()) {
double u = 0;
if (!cut(g1,g2)) {
if (index.empty()) // if index is empty, assume all in g1 have changed
for (auto &i : g1)
for (auto &j : g2)
u += i2i(i,j);
else // only a subset of g1
for (auto i : index)
for (auto &j : g2)
u += i2i( *(g1.begin()+i), j);
}
return u;
}
public:
Tspace& spc; //!< Space to operate on
Tpairpot pairpot; //!< Pair potential
Nonbonded(const json &j, Tspace &spc) : spc(spc) {
name="nonbonded";
pairpot = j;
Rc2_g2g = std::pow( j.value("cutoff_g2g", pc::infty), 2);
}
double energy(Change &change) override {
using namespace ranges;
double u=0;
if (!change.empty()) {
if (change.dV) {
#pragma omp parallel for reduction (+:u) schedule (dynamic)
for ( auto i = spc.groups.begin(); i < spc.groups.end(); ++i ) {
for ( auto j=i; ++j != spc.groups.end(); )
u += g2g( *i, *j );
if (i->atomic)
u += g_internal(*i);
}
return u;
}
// did everything change?
if (change.all) {
#pragma omp parallel for reduction (+:u) schedule (dynamic)
for ( auto i = spc.groups.begin(); i < spc.groups.end(); ++i ) {
for ( auto j=i; ++j != spc.groups.end(); )
u += g2g( *i, *j );
u += g_internal(*i);
}
// more todo here...
return u;
}
// if exactly ONE molecule is changed
if (change.groups.size()==1) {
auto& d = change.groups[0];
auto gindex = spc.groups.at(d.index).to_index(spc.p.begin()).first;
if (d.atoms.size()==1) // exactly one atom has moved
return i2all(spc.p.at(gindex+d.atoms[0]));
auto& g1 = spc.groups.at(d.index);
for (auto &g2 : spc.groups)
if (&g1 != &g2)
u += g2g(g1, g2, d.atoms);
if (d.internal)
u += g_internal(g1, d.atoms);
return u;
}
auto moved = change.touchedGroupIndex(); // index of moved groups
auto fixed = view::ints( 0, int(spc.groups.size()) )
| view::remove_if(
[&moved](int i){return std::binary_search(moved.begin(), moved.end(), i);}
); // index of static groups
// moved<->moved
for ( auto i = moved.begin(); i != moved.end(); ++i )
for ( auto j=i; ++j != moved.end(); )
u += g2g( spc.groups[*i], spc.groups[*j] );
// moved<->static
for ( auto i : moved)
for ( auto j : fixed)
u += g2g(spc.groups[i], spc.groups[j]);
// more todo!
}
return u;
}
}; //!< Nonbonded, pair-wise additive energy term
template<typename Tspace, typename Tpairpot>
class NonbondedCached : public Nonbonded<Tspace,Tpairpot> {
private:
typedef Nonbonded<Tspace,Tpairpot> base;
typedef typename Tspace::Tgroup Tgroup;
Eigen::MatrixXf cache;
double g2g(const Tgroup &g1, const Tgroup &g2, const std::vector<int> &index=std::vector<int>()) override {
int i = &g1 - &base::spc.groups.front();
int j = &g2 - &base::spc.groups.front();
if (j<i)
std::swap(i,j);
if (base::key==Energybase::NEW) { // if this is from the trial system,
double u = 0;
if (!base::cut(g1,g2)) {
for (auto &i : g1)
for (auto &j : g2)
u += base::i2i(i,j);
}
cache(i,j) = u;
}
return cache(i,j); // return (cached) value
}
public:
NonbondedCached(const json &j, Tspace &spc) : base(j,spc) {
base::name += "EM";
cache.resize( spc.groups.size(), spc.groups.size() );
cache.setZero();
for ( auto i = base::spc.groups.begin(); i < base::spc.groups.end(); ++i ) {
for ( auto j=i; ++j != base::spc.groups.end(); ) {
int k = &(*i) - &base::spc.groups.front();
int l = &(*j) - &base::spc.groups.front();
if (l<k)
std::swap(k,l);
double u = 0;
if (!base::cut(*i,*j)) {
for (auto &k : *i)
for (auto &l : *j)
u += base::i2i(k,l);
}
cache(k,l) = u;
}
}
}
double energy(Change &change) override {
using namespace ranges;
double u=0;
if (!change.empty()) {
if (change.all || change.dV) {
#pragma omp parallel for reduction (+:u) schedule (dynamic)
for ( auto i = base::spc.groups.begin(); i < base::spc.groups.end(); ++i ) {
for ( auto j=i; ++j != base::spc.groups.end(); )
u += g2g( *i, *j );
}
return u;
}
// if exactly ONE molecule is changed
if (change.groups.size()==1) {
auto& d = change.groups[0];
auto& g1 = base::spc.groups.at(d.index);
for (auto &g2 : base::spc.groups) {
if (&g1 != &g2)
u += g2g(g1, g2, d.atoms);
}
return u;
}
auto moved = change.touchedGroupIndex(); // index of moved groups
auto fixed = view::ints( 0, int(base::spc.groups.size()) )
| view::remove_if(
[&moved](int i){return std::binary_search(moved.begin(), moved.end(), i);}
); // index of static groups
// moved<->moved
for ( auto i = moved.begin(); i != moved.end(); ++i )
for ( auto j=i; ++j != moved.end(); ) {
u += g2g( base::spc.groups[*i], base::spc.groups[*j] );
}
// moved<->static
for ( auto i : moved)
for ( auto j : fixed)
u += g2g(base::spc.groups[i], base::spc.groups[j]);
// more todo!
}
return u;
}
void sync(Energybase *basePtr, Change &change) override {
auto other = dynamic_cast<decltype(this)>(basePtr);
assert(other);
if (change.all || change.dV)
cache.triangularView<Eigen::StrictlyUpper>() = (other->cache).template triangularView<Eigen::StrictlyUpper>();
else
for (auto &d : change.groups) {
for (size_t i=0; i<d.index; i++)
cache(i,d.index) = other->cache(i,d.index);
for (size_t i=d.index+1; i<base::spc.groups.size(); i++)
cache(d.index,i) = other->cache(d.index,i);
}
} //!< Copy energy matrix from other
}; //!< Nonbonded with cached energies (Energy Matrix)
/**
* `udelta` is the total change of updating the energy function. If
* not handled this will appear as an energy drift (which it is!). To
* avoid this, this term is added to the energy but since it's the
* same in both the trial and old state energies it will not affect
* MC move acceptance.
*/
template<typename Tspace>
class Penalty : public Energybase {
protected:
typedef typename Tspace::Tparticle Tparticle;
typedef typename Tspace::Tgroup Tgroup;
typedef typename Tspace::Tpvec Tpvec;
typedef typename std::shared_ptr<ReactionCoordinate::ReactionCoordinateBase> Tcoord;
Tspace &spc;
bool nodrift;
bool quiet;
size_t dim=0;
size_t cnt=0; // number of calls to `sync()`
size_t nupdate; // update frequency [steps]
size_t samplings;
size_t nconv=0;
double udelta=0; // total energy change of updating penalty function
double scale; // scaling factor for f0
double f0; // penalty increment
std::string file, hisfile;
std::vector<Tcoord> rcvec; // vector of reaction coordinate functions
std::vector<double> coord; // latest reaction coordinate
Table<int> histo;
Table<double> penalty;
public:
Penalty(const json &j, Tspace &spc) : spc(spc) {
using namespace ReactionCoordinate;
name = "penalty";
f0 = j.value("f0", 0.5);
scale = j.value("scale", 0.8);
quiet = j.value("quiet", true);
nupdate = j.value("update", 0);
samplings = j.value("samplings", 1);
nodrift = j.value("nodrift", true);
file = j.at("file").get<std::string>();
hisfile = j.value("histogram", "penalty-histogram.dat");
std::vector<double> binwidth, min, max;
if (scale<0 || scale>1)
throw std::runtime_error("`scale` must be in the interval [0:1]");
for (auto &i : j.at("coords"))
if (i.is_object())
if (i.size()==1) {
std::shared_ptr<ReactionCoordinate::ReactionCoordinateBase> rc=nullptr;
for (auto it=i.begin(); it!=i.end(); ++it) {
if (it.key()=="atom")
rc = std::make_shared<AtomProperty>(it.value(), spc);
if (it.key()=="system")
rc = std::make_shared<SystemProperty>(it.value(), spc);
if (it.key()=="cmcm")
rc = std::make_shared<MassCenterSeparation>(it.value(), spc);
if (it.key()=="angle")
rc = std::make_shared<PrincipalAxisAngle>(it.value(), spc);
if (rc!=nullptr) {
if (rc->min>=rc->max || rc->binwidth<=0)
throw std::runtime_error("min<max and binwidth>0 required for '" + it.key() + "'");
rcvec.push_back(rc);
binwidth.push_back( rc->binwidth );
min.push_back( rc->min );
max.push_back( rc->max );
} else
throw std::runtime_error("unknown coordinate type '" + it.key() + "'");
}
}
dim = binwidth.size();
if (dim<1 || dim>2)
throw std::runtime_error("minimum one maximum two coordinates required");
coord.resize(2,0);
histo.reInitializer(binwidth, min, max);
penalty.reInitializer(binwidth, min, max);
std::ifstream f(MPI::prefix+file);
if (f) {
cout << "Loading penalty function '" << MPI::prefix+file << "'" << endl;
std::string hash;
f >> hash >> f0 >> samplings;
cout << "f0 " << f0 << " samplings " << samplings << endl;
for (int row=0; row<penalty.rows(); row++)
for (int col=0; col<penalty.cols(); col++)
if (!f.eof())
f >> penalty(row,col);
else
throw std::runtime_error("penalty file dimension mismatch");
cout << "maxCoeff " << penalty.maxCoeff() << endl;
}
}
virtual ~Penalty() {
std::ofstream f1(MPI::prefix + file), f2(MPI::prefix + hisfile);
if (f1) f1 << "# " << f0 << " " << samplings << "\n" << penalty.array() - penalty.minCoeff() << endl;
if (f2) f2 << histo << endl;
cout << "nconv is " << nconv << endl;
// add function to save to numpy-friendly file...
}
void to_json(json &j) const override {
j["file"] = file;
j["scale"] = scale;
j["update"] = nupdate;
j["nodrift"] = nodrift;
j["histogram"] = hisfile;
j["f0_final"] = f0;
j["nconv"] = nconv;
auto& _j = j["coords"] = json::array();
for (auto rc : rcvec) {
json t;
t[rc->name] = *rc;
_j.push_back(t);
}
}
double energy(Change &change) override {
assert(rcvec.size()<=coord.size());
double u=0;
coord.resize( rcvec.size() );
if (!change.empty()) {
for (size_t i=0; i<rcvec.size(); i++) {
coord.at(i) = rcvec[i]->operator()();
if (!rcvec[i]->inRange(coord[i]))
return pc::infty;
}
penalty.to_index(coord);
u = penalty[coord];
}
return (nodrift) ? u - udelta : u;
}
virtual void update(const std::vector<double> &c) {
if (++cnt % nupdate == 0 && f0>0) {
bool b = histo.minCoeff() >= samplings;
if (b && f0>0) {
double min = penalty.minCoeff();
penalty = penalty.array() - min;
if (!quiet)
cout << "Barriers/kT. Penalty=" << penalty.maxCoeff()
<< " Histogram=" << std::log(double(histo.maxCoeff())/histo.minCoeff())
<< endl;
f0 = f0 * scale; // reduce penalty energy
samplings = std::ceil( samplings / scale );
histo.setZero();
udelta += -min;
nconv += 1;
}
}
coord = c;
histo[coord]++;
penalty[coord] += f0;
udelta += f0;
}
void sync(Energybase *basePtr, Change &change) override {
auto other = dynamic_cast<decltype(this)>(basePtr);
assert(other);
update(other->coord);
other->update(other->coord);
} // @todo: this double the MPI communication
};
#ifdef ENABLE_MPI
template<typename Tspace, typename Base=Penalty<Tspace>>
struct PenaltyMPI : public Base {
using Base::samplings;
using Base::penalty;
using Base::udelta;
using Base::scale;
using Base::histo;
using Base::coord;
using Base::cnt;
using Base::f0;
using Base::file;
using Base::hisfile;
using Base::nconv;
Eigen::VectorXi weights; // array w. mininum histogram counts
Eigen::VectorXd buffer; // receive buffer for penalty functions
PenaltyMPI(const json &j, Tspace &spc) : Base(j,spc) {
weights.resize( MPI::mpi.nproc() );
buffer.resize( penalty.size()*MPI::mpi.nproc() );
}
void update(const std::vector<double> &c) override {
using namespace Faunus::MPI;
double uold = penalty[c];
if (++cnt % this->nupdate == 0 && f0>0) {
int min = histo.minCoeff();
MPI_Barrier(mpi.comm);
MPI_Allgather(&min, 1, MPI_INT, weights.data(), 1, MPI_INT, mpi.comm);
if ( weights.maxCoeff() > samplings ) {
MPI_Gather(penalty.data(), penalty.size(), MPI_DOUBLE,
buffer.data(), penalty.size(), MPI_DOUBLE, 0, mpi.comm);
if (mpi.isMaster()) {
penalty.setZero();
for (int i=0; i<mpi.nproc(); i++)
penalty += Eigen::Map<Eigen::MatrixXd>(
buffer.data()+i*penalty.size(), penalty.rows(), penalty.cols() )
/ double(mpi.nproc());
penalty = penalty.array() - penalty.minCoeff();
}
MPI_Bcast(penalty.data(), penalty.size(), MPI_DOUBLE, 0, mpi.comm);
nconv += 1;
std::ofstream f3(MPI::prefix + std::to_string(nconv) + file);
if (f3) f3 << "# " << f0 << " " << samplings << "\n" << penalty.array() << endl;
std::ofstream f4(MPI::prefix + std::to_string(nconv) + hisfile);
if (f4) f4 << histo << endl;
if (min>0 && !this->quiet)
cout << "Barriers/kT. Penalty=" << penalty.maxCoeff()
<< " Histogram=" << std::log(double(histo.maxCoeff())/histo.minCoeff()) << endl;
histo.setZero();
f0 = f0 * scale; // reduce penalty energy
samplings = std::ceil( samplings / scale );
}
}
coord = c;
histo[coord]++;
penalty[coord] += f0;
udelta += penalty[coord] - uold;
} //!< Average penalty function across all nodes
}; //!< Penalty function with MPI exchange
#endif
#ifdef FAU_POWERSASA
template<class Tspace>
class SASAEnergy : public Energybase {
typedef typename Tspace::Tparticle Tparticle;
typedef typename Tspace::Tpvec Tpvec;
Tspace& spc;
std::vector<float> sasa, radii;
std::vector<Point> coords;
double probe; // sasa probe radius (angstrom)
double conc=0;// co-solute concentration (mol/l)
Average<double> avgArea; // average surface area
std::shared_ptr<POWERSASA::PowerSasa<float,Point>> ps;
void updateSASA(const Tpvec &p) {
radii.resize(p.size());
coords.resize(p.size());
std::transform(p.begin(), p.end(), coords.begin(), [](auto &a){ return a.pos;});
std::transform(p.begin(), p.end(), radii.begin(),
[this](auto &a){ return atoms<Tparticle>[a.id].sigma*0.5 + this->probe;});
ps->update_coords(coords, radii); // slowest step!
for (size_t i=0; i<p.size(); i++) {
auto &a = atoms<Tparticle>[p[i].id];
if (std::fabs(a.tfe)>1e-9 || std::fabs(a.tension)>1e-9)
ps->calc_sasa_single(i);
}
sasa = ps->getSasa();
assert(sasa.size()==p.size());
}
void to_json(json &j) const override {
using namespace u8;
j["molarity"] = conc / 1.0_molar;
j["radius"] = probe / 1.0_angstrom;
j[bracket("SASA")+"/"+angstrom+squared] = avgArea.avg() / 1.0_angstrom;
_roundjson(j,5);
}
public:
SASAEnergy(const json &j, Tspace &spc) : spc(spc) {
name = "sasa";
cite = "doi:10.1002/jcc.21844";
probe = j.value("radius", 1.4) * 1.0_angstrom;
conc = j.value("molarity", conc) * 1.0_molar;
radii.resize(spc.p.size());
coords.resize(spc.p.size());
std::transform(spc.p.begin(), spc.p.end(), coords.begin(), [](auto &a){ return a.pos;});
std::transform(spc.p.begin(), spc.p.end(), radii.begin(),
[this](auto &a){ return atoms<Tparticle>[a.id].sigma*0.5 + this->probe;});
ps = std::make_shared<POWERSASA::PowerSasa<float,Point>>(coords,radii);
}
double energy(Change &change) override {
double u=0, A=0;
updateSASA(spc.p);
for (size_t i=0; i<sasa.size(); ++i) {
auto &a = atoms<Tparticle>[ spc.p[i].id ];
u += sasa[i] * (a.tension + conc * a.tfe);
A += sasa[i];
}
avgArea+=A; // sample average area for accepted confs. only
return u;
}
}; //!< SASA energy from transfer free energies
#endif
struct Example2D : public Energybase {
Point& i; // reference to 1st particle in the system
template<typename Tspace>
Example2D(const json &j, Tspace &spc): i(spc.p.at(0).pos) { name = "Example2D"; }
double energy(Change &change) override {
double s=1+std::sin(2*pc::pi*i.x())+std::cos(2*pc::pi*i.y());
if (i.x()>=-2.00 && i.x()<=-1.25) return 1*s;
if (i.x()>=-1.25 && i.x()<=-0.25) return 2*s;
if (i.x()>=-0.25 && i.x()<= 0.75) return 3*s;
if (i.x()>= 0.75 && i.x()<= 1.75) return 4*s;
if (i.x()>= 1.75 && i.x()<= 2.00) return 5*s;
return 1e10;
}
};
template<typename Tspace>
class Hamiltonian : public Energybase, public BasePointerVector<Energybase> {
protected:
typedef typename Tspace::Tparticle Tparticle;
void to_json(json &j) const override {
for (auto i : this->vec)
j.push_back(*i);
}
void addEwald(const json &j, Tspace &spc) {
if (j.count("coulomb")==1)
if (j["coulomb"].at("type")=="ewald")
push_back<Energy::Ewald<Tspace>>(j["coulomb"], spc);
} //!< Adds an instance of reciprocal space Ewald energies (if appropriate)
public:
Hamiltonian(Tspace &spc, const json &j) {
using namespace Potential;
typedef CombinedPairPotential<CoulombGalore,LennardJones<Tparticle>> CoulombLJ;
typedef CombinedPairPotential<CoulombGalore,HardSphere<Tparticle>> CoulombHS;
typedef CombinedPairPotential<CoulombGalore,WeeksChandlerAndersen<Tparticle>> CoulombWCA;
typedef CombinedPairPotential<Coulomb,WeeksChandlerAndersen<Tparticle>> PrimitiveModelWCA;
Energybase::name="hamiltonian";
for (auto &m : j.at("energy")) {// loop over move list
size_t oldsize = vec.size();
for (auto it=m.begin(); it!=m.end(); ++it) {
try {
if (it.key()=="nonbonded_coulomblj")
push_back<Energy::Nonbonded<Tspace,CoulombLJ>>(it.value(), spc);
if (it.key()=="nonbonded")
push_back<Energy::Nonbonded<Tspace,FunctorPotential<typename Tspace::Tparticle>>>(it.value(), spc);
if (it.key()=="nonbonded_coulombhs")
push_back<Energy::Nonbonded<Tspace,CoulombHS>>(it.value(), spc);
if (it.key()=="nonbonded_coulombwca")
push_back<Energy::Nonbonded<Tspace,CoulombWCA>>(it.value(), spc);
if (it.key()=="nonbonded_pmwca")
push_back<Energy::Nonbonded<Tspace,PrimitiveModelWCA>>(it.value(), spc);
if (it.key()=="nonbonded_deserno")
push_back<Energy::NonbondedCached<Tspace,DesernoMembrane<typename Tspace::Tparticle>>>(it.value(), spc);
if (it.key()=="nonbonded_desernoAA")
push_back<Energy::NonbondedCached<Tspace,DesernoMembraneAA<typename Tspace::Tparticle>>>(it.value(), spc);
if (it.key()=="bonded")
push_back<Energy::Bonded<Tspace>>(it.value(), spc);
if (it.key()=="confine")
push_back<Energy::Confine<Tspace>>(it.value(), spc);
if (it.key()=="example2d")
push_back<Energy::Example2D>(it.value(), spc);
if (it.key()=="isobaric")
push_back<Energy::Isobaric<Tspace>>(it.value(), spc);
if (it.key()=="penalty")
#ifdef ENABLE_MPI
push_back<Energy::PenaltyMPI<Tspace>>(it.value(), spc);
#else
push_back<Energy::Penalty<Tspace>>(it.value(), spc);
#endif
#ifdef ENABLE_POWERSASA
if (it.key()=="sasa")
push_back<Energy::SASAEnergy<Tspace>>(it.value(), spc);
#endif
// additional energies go here...
addEwald(it.value(), spc); // add reciprocal Ewald terms if appropriate
if (vec.size()==oldsize)
std::cerr << "warning: ignoring unknown energy '" << it.key() << "'" << endl;
} catch (std::exception &e) {
throw std::runtime_error("Error adding energy '" + it.key() + "': " + e.what());
}
}
}
}
double energy(Change &change) override {
double du=0;
for (auto i : this->vec) {
i->key=key;
du += i->energy(change);
}
return du;
} //!< Energy due to changes
void sync(Hamiltonian &other, Change &change) {
assert(other.size()==size());
for (size_t i=0; i<size(); i++)
this->vec[i]->sync( other.vec[i].get(), change);
}
}; //!< Aggregates and sum energy terms
}//namespace
}//namespace
|
kvstore_dist_server.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file mxnet_node.h
* \brief implement mxnet nodes
*/
#ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#include <mxnet/c_api.h>
#include <mxnet/kvstore.h>
#include <ps/ps.h>
#include <queue>
#include <string>
#include <mutex>
#include <condition_variable>
#include <memory>
#include <functional>
#include <future>
#include <vector>
#include "../profiler/profiler.h"
#include "../operator/tensor/elemwise_binary_op-inl.h"
#include "../operator/tensor/init_op.h"
namespace mxnet {
namespace kvstore {
// maintain same order in frontend.
enum class CommandType {
kController, kSetMultiPrecision, kStopServer, kSyncMode,
kSetGradientCompression, kSetProfilerParams
};
enum class RequestType {
kDefaultPushPull, kRowSparsePushPull, kCompressedPushPull
};
struct DataHandleType {
RequestType requestType;
int dtype;
};
/*!
* Uses Cantor pairing function to generate a unique number given two numbers.
* This number can also be inverted to find the unique pair whose Cantor value is this number.
* Ref: https://en.wikipedia.org/wiki/Pairing_function#Cantor_pairing_function
* \param requestType RequestType
* \param dtype integer
* \return Cantor value of arguments
*/
static int GetCommandType(RequestType requestType, int d) {
int m = static_cast<int>(requestType);
return (((m + d) * (m + d + 1)) / 2) + d;
}
/*!
* Unpairs Cantor value and finds the two integers used to pair.
* Then returns DataHandleType object with those numbers.
* \param cmd DataHandleCommand generated by GetCommandType function
* \return DataHandleType
*/
static DataHandleType DepairDataHandleType(int cmd) {
int w = std::floor((std::sqrt(8 * cmd + 1) - 1)/2);
int t = ((w * w) + w) / 2;
int y = cmd - t;
int x = w - y;
CHECK_GE(x, 0);
CHECK_GE(y, 0);
DataHandleType type;
type.requestType = static_cast<RequestType>(x);
type.dtype = y;
return type;
}
/**
* \brief executor runs a function using the thread called \ref Start
*/
class Executor {
public:
/**
* \brief start the executor
*/
void Start() {
std::unique_lock<std::mutex> lk(mu_);
while (true) {
cond_.wait(lk, [this]{return !queue_.empty();});
Block blk = std::move(queue_.front());
queue_.pop();
lk.unlock();
if (blk.f) {
blk.f();
blk.p->set_value();
} else {
blk.p->set_value(); break;
}
lk.lock();
}
}
/**
* \brief function
*/
typedef std::function<void()> Func;
/**
* \brief let the thread called \ref Start to exec a function. threadsafe
*/
void Exec(const Func& func) {
Block blk(func);
auto fut = blk.p->get_future();
{
std::lock_guard<std::mutex> lk(mu_);
queue_.push(std::move(blk));
cond_.notify_one();
}
fut.wait();
}
/**
* \brief stop the thread, threadsafe
*/
void Stop() {
Exec(Func());
}
private:
struct Block {
explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { }
Func f;
std::shared_ptr<std::promise<void>> p;
};
std::queue<Block> queue_;
std::mutex mu_;
std::condition_variable cond_;
};
class KVStoreDistServer {
public:
KVStoreDistServer() {
using namespace std::placeholders;
ps_server_ = new ps::KVServer<char>(0);
static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle(
std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2));
ps_server_->set_request_handle(
std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3));
sync_mode_ = false;
gradient_compression_ = std::make_shared<GradientCompression>();
log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false);
}
~KVStoreDistServer() {
profiler::Profiler::Get()->SetState(profiler::Profiler::ProfilerState(0));
delete ps_server_;
}
void set_controller(const KVStore::Controller& controller) {
CHECK(controller);
controller_ = controller;
}
void set_updater(const KVStore::Updater& updater) {
CHECK(updater);
updater_ = updater;
}
/**
* \brief blocked until received the command \a kSyncMode
*/
void Run() {
exec_.Start();
}
private:
struct UpdateBuf {
std::vector<ps::KVMeta> request;
NDArray merged;
// temp_array is used to cast received values as float32 for computation if required
NDArray temp_array;
};
void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) {
CommandType recved_type = static_cast<CommandType>(recved.head);
switch (recved_type) {
case CommandType::kStopServer:
exec_.Stop();
break;
case CommandType::kSyncMode:
sync_mode_ = true;
break;
case CommandType::kSetGradientCompression:
gradient_compression_->DecodeParams(recved.body);
break;
case CommandType::kSetProfilerParams:
// last char is the type of profiler command
ProcessServerProfilerCommands(static_cast<KVStoreServerProfilerCommand>
(recved.body.back() - '0'),
recved.body);
break;
case CommandType::kSetMultiPrecision:
// uses value 1 for message id from frontend
if (!multi_precision_) {
multi_precision_ = true;
CreateMultiPrecisionCopies();
}
break;
case CommandType::kController:
// this uses value 0 for message id from frontend
// let the main thread to execute ctrl, which is necessary for python
exec_.Exec([this, recved]() {
CHECK(controller_);
controller_(recved.head, recved.body);
});
break;
}
app->Response(recved);
}
/*
* For keys already initialized, if necessary create stored_realt.
* This will only be used if by some wrong usage of kvstore,
* some keys are initialized before optimizer is set.
*/
void CreateMultiPrecisionCopies() {
for (auto const &stored_entry : store_) {
const int key = stored_entry.first;
const NDArray &stored = stored_entry.second;
if (stored.dtype() != mshadow::kFloat32) {
auto &stored_realt = store_realt_[key];
if (stored.storage_type() == kRowSparseStorage) {
stored_realt = NDArray(kRowSparseStorage, stored.shape(), stored.ctx(),
true, mshadow::kFloat32);
} else {
stored_realt = NDArray(stored.shape(), stored.ctx(), false, mshadow::kFloat32);
}
auto &update = update_buf_[key];
if (!update.merged.is_none()) {
if (update.merged.storage_type() == kRowSparseStorage) {
update.merged = NDArray(kRowSparseStorage, update.merged.shape(), update.merged.ctx(),
true, mshadow::kFloat32);
} else {
update.merged = NDArray(update.merged.shape(), update.merged.ctx(), false,
mshadow::kFloat32);
}
}
CHECK(update.request.size() == 0)
<< ps::MyRank() << "Multiprecision mode can not be set while pushes are underway."
<< "Please set optimizer before pushing keys." << key << " " << update.request.size();
CopyFromTo(stored, stored_realt);
}
}
for (auto const &stored_realt_entry : store_realt_) {
stored_realt_entry.second.WaitToRead();
}
}
void ProcessServerProfilerCommands(KVStoreServerProfilerCommand type, const std::string& body) {
switch (type) {
case KVStoreServerProfilerCommand::kSetConfig:
SetProfilerConfig(body.substr(0, body.size() - 1));
break;
case KVStoreServerProfilerCommand::kState:
MXSetProfilerState(static_cast<int>(body.front() - '0'));
break;
case KVStoreServerProfilerCommand::kPause:
MXProfilePause(static_cast<int>(body.front() - '0'));
break;
case KVStoreServerProfilerCommand::kDump:
MXDumpProfile(static_cast<int>(body.front() - '0'));
break;
}
}
void SetProfilerConfig(std::string params_str) {
std::vector<std::string> elems;
mxnet::kvstore::split(params_str, ',', std::back_inserter(elems));
std::vector<const char*> ckeys;
std::vector<const char*> cvals;
ckeys.reserve(elems.size());
cvals.reserve(elems.size());
for (size_t i=0; i < elems.size(); i++) {
std::vector<std::string> parts;
mxnet::kvstore::split(elems[i], ':', std::back_inserter(parts));
CHECK_EQ(parts.size(), 2) << "Improper profiler config passed from worker";
CHECK(!parts[0].empty()) << "ProfilerConfig parameter is empty";
CHECK(!parts[1].empty()) << "ProfilerConfig value is empty for parameter "<< parts[0];
if (parts[0] == "filename") {
parts[1] = "rank" + std::to_string(ps::MyRank()) + "_" + parts[1];
}
char* ckey = new char[parts[0].length() + 1];
std::snprintf(ckey, parts[0].length() + 1, "%s", parts[0].c_str());
ckeys.push_back(ckey);
char* cval = new char[parts[1].length() + 1];
std::snprintf(cval, parts[1].length() + 1, "%s", parts[1].c_str());
cvals.push_back(cval);
}
MXSetProfilerConfig(elems.size(), &ckeys[0], &cvals[0]);
for (size_t i=0; i < ckeys.size(); i++) {
delete[] ckeys[i];
delete[] cvals[i];
}
}
void DataHandleEx(const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
DataHandleType type = DepairDataHandleType(req_meta.cmd);
switch (type.requestType) {
case RequestType::kRowSparsePushPull:
DataHandleRowSparse(type, req_meta, req_data, server);
break;
case RequestType::kCompressedPushPull:
DataHandleCompressed(type, req_meta, req_data, server);
break;
case RequestType::kDefaultPushPull:
DataHandleDefault(type, req_meta, req_data, server);
break;
}
}
inline bool has_multi_precision_copy(const DataHandleType type) {
return multi_precision_ && type.dtype != mshadow::kFloat32;
}
inline void ApplyUpdates(const DataHandleType type, const int key,
UpdateBuf *update_buf, ps::KVServer<char>* server) {
if (!sync_mode_ || update_buf->request.size() == (size_t) ps::NumWorkers()) {
// let the main thread to execute updater_, which is necessary for python
auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key];
auto& update = sync_mode_ ? update_buf->merged : update_buf->temp_array;
if (updater_) {
exec_.Exec([this, key, &update, &stored](){
CHECK(updater_);
updater_(key, update, &stored);
});
} else {
CHECK(sync_mode_) << "Updater needs to be set for async mode";
// if no updater, just copy
CopyFromTo(update_buf->merged, &stored);
}
if (log_verbose_) {
LOG(INFO) << "sent response to " << update_buf->request.size() << " workers";
}
for (const auto& req : update_buf->request) {
server->Response(req);
}
update_buf->request.clear();
if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]);
stored.WaitToRead();
} else {
update_buf->merged.WaitToRead();
}
}
void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices,
const int64_t master_key, const int64_t num_rows) {
indices[0] = 0;
for (int64_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(keys[i]);
auto row_id = key - master_key;
indices[i - 1] = row_id;
}
}
void AccumulateRowSparseGrads(const DataHandleType type,
const NDArray& recved,
UpdateBuf* updateBuf) {
NDArray out(kRowSparseStorage, updateBuf->merged.shape(), Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
if (has_multi_precision_copy(type)) CopyFromTo(recved, updateBuf->temp_array);
const NDArray& to_merge = has_multi_precision_copy(type) ? updateBuf->temp_array : recved;
// accumulate row_sparse gradients
using namespace mshadow;
Engine::Get()->PushAsync(
[to_merge, updateBuf, out](RunContext ctx, Engine::CallbackOnComplete on_complete) {
op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>(
{}, {}, {to_merge, updateBuf->merged}, {kWriteTo}, {out});
on_complete();
}, to_merge.ctx(), {to_merge.var(), updateBuf->merged.var()}, {out.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
CopyFromTo(out, &(updateBuf->merged), 0);
updateBuf->merged.WaitToRead();
}
void RowSparsePullResponse(const DataHandleType type,
const int master_key,
const size_t num_rows,
const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
if (log_verbose_) LOG(INFO) << "pull: " << master_key;
ps::KVPairs<char> response;
if (num_rows == 0) {
std::vector<int> lens(req_data.keys.size(), 0);
response.keys = req_data.keys;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
return;
}
const NDArray& stored = store_[master_key];
if (has_multi_precision_copy(type)) stored.WaitToRead();
CHECK(!stored.is_none()) << "init " << master_key << " first";
auto shape = stored.shape();
auto unit_len = shape.ProdShape(1, shape.ndim());
const int num_bytes = mshadow::mshadow_sizeof(type.dtype);
const int unit_size = unit_len * num_bytes;
const char* data = static_cast<char *> (stored.data().dptr_);
auto len = num_rows * unit_size;
// concat values
response.vals.resize(len);
#pragma omp parallel for
for (size_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(req_data.keys[i]);
int64_t row_id = key - master_key;
const auto src = data + row_id * unit_size;
auto begin = (i - 1) * unit_size;
auto end = i * unit_size;
response.vals.segment(begin, end).CopyFrom(src, unit_size);
}
// setup response
response.keys = req_data.keys;
std::vector<int> lens(req_data.keys.size(), unit_len);
lens[0] = 0;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
}
void InitRowSparseStored(const DataHandleType type,
const int master_key,
const size_t num_rows,
const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
auto& stored = has_multi_precision_copy(type) ? store_realt_[master_key] : store_[master_key];
int dtype = type.dtype;
int num_bytes = mshadow::mshadow_sizeof(dtype);
auto unit_len = req_data.lens[1] / num_bytes;
CHECK_GT(unit_len, 0);
size_t ds[] = {num_rows, (size_t) unit_len};
mxnet::TShape dshape(ds, ds + 2);
CHECK_EQ(req_data.vals.size(), num_rows * unit_len * num_bytes);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask);
})
NDArray recved = NDArray(recv_blob, 0);
stored = NDArray(kRowSparseStorage, dshape, Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
if (has_multi_precision_copy(type)) {
store_[master_key] = NDArray(kRowSparseStorage, dshape, Context(), true, type.dtype);
}
Engine::Get()->PushAsync(
[this, recved, stored, type](RunContext ctx, Engine::CallbackOnComplete on_complete) {
NDArray rsp = stored;
stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])});
mshadow::Stream<cpu> *s = ctx.get_stream<cpu>();
using namespace mxnet::op;
nnvm::dim_t nnr = rsp.shape()[0];
MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, {
IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>();
mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx);
});
TBlob rsp_data = rsp.data();
// copies or casts as appropriate
ndarray::Copy<cpu, cpu>(recved.data(), &rsp_data, Context(), Context(), RunContext());
on_complete();
}, recved.ctx(), {recved.var()}, {stored.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
if (has_multi_precision_copy(type)) {
CopyFromTo(stored, store_[master_key]);
store_[master_key].WaitToRead();
}
stored.WaitToRead();
server->Response(req_meta);
}
void DataHandleRowSparse(const DataHandleType type, const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
int master_key = DecodeKey(req_data.keys[0]);
auto num_rows = req_data.keys.size() - 1;
auto& stored = store_[master_key];
if (req_meta.push) {
CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty";
CHECK_EQ(req_data.lens[0], 0);
if (stored.is_none()) {
if (log_verbose_) LOG(INFO) << "initial push: " << master_key;
// initialization
CHECK_GT(num_rows, 0) << "init with empty data is not supported";
InitRowSparseStored(type, master_key, num_rows, req_meta, req_data, server);
return;
} else {
if (log_verbose_) LOG(INFO) << "push: " << master_key << " " << req_data.keys;
auto& updates = update_buf_[master_key];
if (sync_mode_ && updates.merged.is_none()) {
updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
}
if (has_multi_precision_copy(type) && updates.temp_array.is_none()) {
updates.temp_array = NDArray(kRowSparseStorage, stored.shape(), Context(), false,
mshadow::kFloat32);
}
if (num_rows == 0) {
if (sync_mode_) {
if (updates.request.empty()) {
// reset to zeros
int merged_dtype = has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype;
updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(),
true, merged_dtype);
} // else nothing to aggregate
updates.request.push_back(req_meta);
ApplyUpdates(type, master_key, &updates, server);
} else {
server->Response(req_meta);
}
} else {
auto unit_len = req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype);
CHECK_GT(unit_len, 0);
// indices
std::vector<int64_t> indices(num_rows);
DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows);
// data
TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask);
size_t ds[] = {(size_t) num_rows, (size_t) unit_len};
mxnet::TShape dshape(ds, ds + 2);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()),
dshape, cpu::kDevMask);
})
// row_sparse NDArray
NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0);
if (updates.request.empty()) {
if (sync_mode_) {
CopyFromTo(recved, updates.merged);
} else {
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
} else {
updates.temp_array = recved;
}
}
} else {
CHECK(sync_mode_);
AccumulateRowSparseGrads(type, recved, &updates);
}
updates.request.push_back(req_meta);
ApplyUpdates(type, master_key, &updates, server);
}
}
} else {
// pull
RowSparsePullResponse(type, master_key, num_rows, req_meta, req_data, server);
}
}
void DefaultStorageResponse(const DataHandleType type,
const int key,
const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
ps::KVPairs<char> response;
const NDArray& stored = store_[key];
CHECK(!stored.is_none()) << "init " << key << " first";
// as server returns when store_realt is ready in this case
if (has_multi_precision_copy(type)) stored.WaitToRead();
auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype());
response.keys = req_data.keys;
response.lens = {len};
// TODO(mli) try to remove this CopyFrom
response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len);
server->Response(req_meta, response);
}
void DataHandleCompressed(const DataHandleType type,
const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
CHECK_EQ(type.dtype, mshadow::kFloat32)
<< "Gradient compression is currently supported for fp32 only";
if (req_meta.push) {
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
// first for dummy key which represents original size of array, whose len is 0
CHECK_EQ(req_data.keys.size(), (size_t)2);
CHECK_EQ(req_data.lens.size(), (size_t)2);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]);
int original_size = DecodeKey(req_data.keys[0]);
int key = DecodeKey(req_data.keys[1]);
auto& stored = store_[key];
size_t ds[] = {(size_t)req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype)};
mxnet::TShape dshape(ds, ds + 1);
TBlob recv_blob(reinterpret_cast<real_t*>(req_data.vals.data()), dshape, cpu::kDevMask);
NDArray recved = NDArray(recv_blob, 0);
NDArray decomp_buf = decomp_buf_[key];
dshape = mxnet::TShape{(int64_t) original_size};
if (decomp_buf.is_none()) {
decomp_buf = NDArray(dshape, Context());
}
if (stored.is_none()) {
stored = NDArray(dshape, Context());
gradient_compression_->Dequantize(recved, &stored, 0);
server->Response(req_meta);
stored.WaitToRead();
} else if (sync_mode_) {
// synced push
auto& merged = update_buf_[key];
if (merged.merged.is_none()) {
merged.merged = NDArray(dshape, Context());
}
if (merged.request.size() == 0) {
gradient_compression_->Dequantize(recved, &merged.merged, 0);
} else {
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
merged.merged += decomp_buf;
}
merged.request.push_back(req_meta);
ApplyUpdates(type, key, &merged, server);
} else {
// async push
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
exec_.Exec([this, key, &decomp_buf, &stored]() {
CHECK(updater_);
updater_(key, decomp_buf, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
}
} else { // pull
CHECK_EQ(req_data.keys.size(), (size_t)1);
CHECK_EQ(req_data.lens.size(), (size_t)0);
int key = DecodeKey(req_data.keys[0]);
DefaultStorageResponse(type, key, req_meta, req_data, server);
}
}
void DataHandleDefault(const DataHandleType type, const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
// do some check
CHECK_EQ(req_data.keys.size(), (size_t)1);
if (req_meta.push) {
CHECK_EQ(req_data.lens.size(), (size_t)1);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]);
}
int key = DecodeKey(req_data.keys[0]);
auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key];
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
if (req_meta.push) {
size_t ds[] = {(size_t) req_data.lens[0] / mshadow::mshadow_sizeof(type.dtype)};
mxnet::TShape dshape(ds, ds + 1);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask);
})
NDArray recved = NDArray(recv_blob, 0);
if (stored.is_none()) {
// initialization
stored = NDArray(dshape, Context(), false,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
CopyFromTo(recved, &stored, 0);
server->Response(req_meta);
if (has_multi_precision_copy(type)) {
auto& stored_dtype = store_[key];
stored_dtype = NDArray(dshape, Context(), false, type.dtype);
CopyFromTo(stored, stored_dtype);
stored_dtype.WaitToRead();
}
stored.WaitToRead();
} else {
auto &updates = update_buf_[key];
if (sync_mode_ && updates.merged.is_none()) {
updates.merged = NDArray(dshape, Context(), false,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
}
if (has_multi_precision_copy(type) && updates.temp_array.is_none()) {
updates.temp_array = NDArray(dshape, Context(), false, mshadow::kFloat32);
}
if (updates.request.empty()) {
if (sync_mode_) {
CopyFromTo(recved, updates.merged);
} else {
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
} else {
updates.temp_array = recved;
}
}
} else {
CHECK(sync_mode_);
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
updates.merged += updates.temp_array;
} else {
updates.merged += recved;
}
}
updates.request.push_back(req_meta);
ApplyUpdates(type, key, &updates, server);
}
} else {
DefaultStorageResponse(type, key, req_meta, req_data, server);
}
}
int DecodeKey(ps::Key key) {
auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()];
return key - kr.begin();
}
/**
* \brief user defined mode for push
*/
bool sync_mode_;
KVStore::Controller controller_;
KVStore::Updater updater_;
/**
* \brief store_ contains the value at kvstore for each key
*/
std::unordered_map<int, NDArray> store_;
std::unordered_map<int, NDArray> store_realt_;
/**
* \brief merge_buf_ is a buffer used if sync_mode is true. It represents
* values from different workers being merged. The store will be updated
* to this value when values from all workers are pushed into this buffer.
*/
std::unordered_map<int, UpdateBuf> update_buf_;
/**
* \brief decomp_buf_ is a buffer into which compressed values are
* decompressed before merging to the store. used when compress_!='none'
*/
std::unordered_map<int, NDArray> decomp_buf_;
Executor exec_;
ps::KVServer<char>* ps_server_;
// whether to LOG verbose information
bool log_verbose_;
/*
* \brief whether to use multi precision mode.
* in multi precision mode, all weights are stored as float32.
* any gradient received will be cast to float32 before accumulation and updating of weights.
*/
bool multi_precision_;
/**
* \brief gradient compression object.
* starts with none, used after SetGradientCompression sets the type
* currently there is no support for unsetting gradient compression
*/
std::shared_ptr<kvstore::GradientCompression> gradient_compression_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
|
DRB085-threadprivate-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A file-scope variable used within a function called by a parallel region.
Use threadprivate to avoid data races.
*/
#include <stdio.h>
#include <assert.h>
int sum0=0, sum1=0;
#pragma omp threadprivate(sum0)
void foo (int i)
{
sum0=sum0+i;
}
int main()
{
int len=1000;
int i, sum=0;
#pragma omp parallel copyin(sum0)
{
#pragma omp for schedule(dynamic)
for (i=0;i<len;i++)
{
foo (i);
}
#pragma omp critical
{
sum= sum+sum0;
}
}
/* reference calculation */
for (i=0;i<len;i++)
{
sum1=sum1+i;
}
printf("sum=%d; sum1=%d\n",sum,sum1);
assert(sum==sum1);
return 0;
}
|
drsdd.c | /*! @copyright (c) 2017 King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
*
* STARS-H is a software package, provided by King Abdullah
* University of Science and Technology (KAUST)
*
* @file src/backends/mpi/blrm/drsdd.c
* @version 1.3.0
* @author Aleksandr Mikhalev
* @date 2017-11-07
* */
#include "common.h"
#include "starsh.h"
#include "starsh-mpi.h"
int cmp_size_t(const void *a, const void *b)
//! Compare two size_t integers
{
size_t _a = *(size_t *)a;
size_t _b = *(size_t *)b;
if(_a > _b) return 1;
if(_a == _b) return 0;
return -1;
}
int starsh_blrm__drsdd_mpi(STARSH_blrm **matrix, STARSH_blrf *format,
int maxrank, double tol, int onfly)
//! Approximate each tile by randomized SVD.
/*!
* @param[out] matrix: Address of pointer to @ref STARSH_blrm object.
* @param[in] format: Block low-rank format.
* @param[in] maxrank: Maximum possible rank.
* @param[in] tol: Relative error tolerance.
* @param[in] onfly: Whether not to store dense blocks.
* @return Error code @ref STARSH_ERRNO.
* @ingroup blrm
* */
{
STARSH_blrf *F = format;
STARSH_problem *P = F->problem;
STARSH_kernel *kernel = P->kernel;
STARSH_int nblocks_far = F->nblocks_far;
STARSH_int nblocks_near = F->nblocks_near;
STARSH_int nblocks_far_local = F->nblocks_far_local;
STARSH_int nblocks_near_local = F->nblocks_near_local;
// Shortcuts to information about clusters
STARSH_cluster *RC = F->row_cluster;
STARSH_cluster *CC = F->col_cluster;
void *RD = RC->data, *CD = CC->data;
// Following values default to given block low-rank format F, but they are
// changed when there are false far-field blocks.
STARSH_int new_nblocks_far = F->nblocks_far;
STARSH_int new_nblocks_near = F->nblocks_near;
STARSH_int new_nblocks_far_local = F->nblocks_far_local;
STARSH_int new_nblocks_near_local = F->nblocks_near_local;
STARSH_int *block_far = F->block_far;
STARSH_int *block_near = F->block_near;
STARSH_int *block_far_local = F->block_far_local;
STARSH_int *block_near_local = F->block_near_local;
// Places to store low-rank factors, dense blocks and ranks
Array **far_U = NULL, **far_V = NULL, **near_D = NULL;
int *far_rank = NULL;
double *alloc_U = NULL, *alloc_V = NULL, *alloc_D = NULL;
size_t offset_U = 0, offset_V = 0, offset_D = 0;
STARSH_int lbi, lbj, bi, bj = 0;
double drsdd_time = 0, kernel_time = 0;
const int oversample = starsh_params.oversample;
// Init buffers to store low-rank factors of far-field blocks if needed
if(nblocks_far > 0)
{
STARSH_MALLOC(far_U, nblocks_far_local);
STARSH_MALLOC(far_V, nblocks_far_local);
STARSH_MALLOC(far_rank, nblocks_far_local);
size_t size_U = 0, size_V = 0;
// Simple cycle over all far-field blocks
for(lbi = 0; lbi < nblocks_far_local; lbi++)
{
STARSH_int bi = block_far_local[lbi];
// Get indexes of corresponding block row and block column
STARSH_int i = block_far[2*bi];
STARSH_int j = block_far[2*bi+1];
// Get corresponding sizes and minimum of them
size_U += RC->size[i];
size_V += CC->size[j];
}
size_U *= maxrank;
size_V *= maxrank;
STARSH_MALLOC(alloc_U, size_U);
STARSH_MALLOC(alloc_V, size_V);
for(lbi = 0; lbi < nblocks_far_local; lbi++)
{
STARSH_int bi = block_far_local[lbi];
// Get indexes of corresponding block row and block column
STARSH_int i = block_far[2*bi];
STARSH_int j = block_far[2*bi+1];
// Get corresponding sizes and minimum of them
size_t nrows = RC->size[i], ncols = CC->size[j];
int shape_U[] = {nrows, maxrank};
int shape_V[] = {ncols, maxrank};
double *U = alloc_U+offset_U, *V = alloc_V+offset_V;
offset_U += nrows*maxrank;
offset_V += ncols*maxrank;
array_from_buffer(far_U+lbi, 2, shape_U, 'd', 'F', U);
array_from_buffer(far_V+lbi, 2, shape_V, 'd', 'F', V);
}
offset_U = 0;
offset_V = 0;
}
// Work variables
int info;
// Simple cycle over all far-field admissible blocks
#pragma omp parallel for schedule(dynamic, 1)
for(lbi = 0; lbi < nblocks_far_local; lbi++)
{
STARSH_int bi = block_far_local[lbi];
// Get indexes of corresponding block row and block column
STARSH_int i = block_far[2*bi];
STARSH_int j = block_far[2*bi+1];
// Get corresponding sizes and minimum of them
int nrows = RC->size[i];
int ncols = CC->size[j];
int mn = nrows < ncols ? nrows : ncols;
int mn2 = maxrank+oversample;
if(mn2 > mn)
mn2 = mn;
// Get size of temporary arrays
int lwork = ncols, lwork_sdd = (4*mn2+7)*mn2;
if(lwork_sdd > lwork)
lwork = lwork_sdd;
lwork += (size_t)mn2*(2*ncols+nrows+mn2+1);
int liwork = 8*mn2;
double *D, *work;
int *iwork;
int info;
// Allocate temporary arrays
STARSH_PMALLOC(D, (size_t)nrows*(size_t)ncols, info);
STARSH_PMALLOC(iwork, liwork, info);
STARSH_PMALLOC(work, lwork, info);
// Compute elements of a block
#ifdef OPENMP
double time0 = omp_get_wtime();
#endif
kernel(nrows, ncols, RC->pivot+RC->start[i], CC->pivot+CC->start[j],
RD, CD, D, nrows);
#ifdef OPENMP
double time1 = omp_get_wtime();
#endif
starsh_dense_dlrrsdd(nrows, ncols, D, nrows, far_U[lbi]->data, nrows,
far_V[lbi]->data, ncols, far_rank+lbi, maxrank, oversample,
tol, work, lwork, iwork);
#ifdef OPENMP
double time2 = omp_get_wtime();
#pragma omp critical
{
drsdd_time += time2-time1;
kernel_time += time1-time0;
}
#endif
// Free temporary arrays
free(D);
free(work);
free(iwork);
}
// Get number of false far-field blocks
STARSH_int nblocks_false_far_local = 0;
STARSH_int *false_far_local = NULL;
for(lbi = 0; lbi < nblocks_far_local; lbi++)
if(far_rank[lbi] == -1)
nblocks_false_far_local++;
if(nblocks_false_far_local > 0)
{
// IMPORTANT: `false_far` and `false_far_local` must be in
// ascending order for later code to work normally
STARSH_MALLOC(false_far_local, nblocks_false_far_local);
lbj = 0;
for(lbi = 0; lbi < nblocks_far_local; lbi++)
if(far_rank[lbi] == -1)
false_far_local[lbj++] = block_far_local[lbi];
}
// Sync list of all false far-field blocks
STARSH_int nblocks_false_far = 0;
int int_nblocks_false_far_local = nblocks_false_far_local;
int *mpi_recvcount, *mpi_offset;
int mpi_size, mpi_rank;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
STARSH_MALLOC(mpi_recvcount, mpi_size);
STARSH_MALLOC(mpi_offset, mpi_size);
MPI_Allgather(&int_nblocks_false_far_local, 1, MPI_INT, mpi_recvcount,
1, MPI_INT, MPI_COMM_WORLD);
for(bi = 0; bi < mpi_size; bi++)
nblocks_false_far += mpi_recvcount[bi];
mpi_offset[0] = 0;
for(bi = 1; bi < mpi_size; bi++)
mpi_offset[bi] = mpi_offset[bi-1]+mpi_recvcount[bi-1];
STARSH_int *false_far = NULL;
if(nblocks_false_far > 0)
STARSH_MALLOC(false_far, nblocks_false_far);
MPI_Allgatherv(false_far_local, nblocks_false_far_local, my_MPI_SIZE_T,
false_far, mpi_recvcount, mpi_offset, my_MPI_SIZE_T,
MPI_COMM_WORLD);
free(mpi_recvcount);
free(mpi_offset);
// Make false_far be in ascending order
qsort(false_far, nblocks_false_far, sizeof(*false_far), cmp_size_t);
if(nblocks_false_far > 0)
{
// Update list of near-field blocks
new_nblocks_near = nblocks_near+nblocks_false_far;
new_nblocks_near_local = nblocks_near_local+nblocks_false_far_local;
STARSH_MALLOC(block_near, 2*new_nblocks_near);
if(new_nblocks_near_local > 0)
STARSH_MALLOC(block_near_local, new_nblocks_near_local);
// At first get all near-field blocks, assumed to be dense
#pragma omp parallel for schedule(static)
for(bi = 0; bi < 2*nblocks_near; bi++)
block_near[bi] = F->block_near[bi];
#pragma omp parallel for schedule(static)
for(lbi = 0; lbi < nblocks_near_local; lbi++)
block_near_local[lbi] = F->block_near_local[lbi];
// Add false far-field blocks
#pragma omp parallel for schedule(static)
for(bi = 0; bi < nblocks_false_far; bi++)
{
STARSH_int bj = false_far[bi];
block_near[2*(bi+nblocks_near)] = F->block_far[2*bj];
block_near[2*(bi+nblocks_near)+1] = F->block_far[2*bj+1];
}
bi = 0;
for(lbi = 0; lbi < nblocks_false_far_local; lbi++)
{
lbj = false_far_local[lbi];
while(bi < nblocks_false_far && false_far[bi] < lbj)
bi++;
block_near_local[nblocks_near_local+lbi] = nblocks_near+bi;
}
// Update list of far-field blocks
new_nblocks_far = nblocks_far-nblocks_false_far;
new_nblocks_far_local = nblocks_far_local-nblocks_false_far_local;
if(new_nblocks_far > 0)
{
STARSH_MALLOC(block_far, 2*new_nblocks_far);
if(new_nblocks_far_local > 0)
STARSH_MALLOC(block_far_local, new_nblocks_far_local);
bj = 0;
lbi = 0;
lbj = 0;
for(bi = 0; bi < nblocks_far; bi++)
{
// `false_far` must be in ascending order for this to work
if(bj < nblocks_false_far && false_far[bj] == bi)
{
if(nblocks_false_far_local > lbj &&
false_far_local[lbj] == bi)
{
lbi++;
lbj++;
}
bj++;
}
else
{
block_far[2*(bi-bj)] = F->block_far[2*bi];
block_far[2*(bi-bj)+1] = F->block_far[2*bi+1];
if(nblocks_far_local > lbi &&
F->block_far_local[lbi] == bi)
{
block_far_local[lbi-lbj] = bi-bj;
lbi++;
}
}
}
}
// Update format by creating new format
STARSH_blrf *F2;
info = starsh_blrf_new_from_coo_mpi(&F2, P, F->symm, RC, CC,
new_nblocks_far, block_far, new_nblocks_far_local,
block_far_local, new_nblocks_near, block_near,
new_nblocks_near_local, block_near_local, F->type);
// Swap internal data of formats and free unnecessary data
STARSH_blrf tmp_blrf = *F;
*F = *F2;
*F2 = tmp_blrf;
if(mpi_rank == 0)
STARSH_WARNING("`F` was modified due to false far-field blocks");
starsh_blrf_free(F2);
}
// Compute near-field blocks if needed
if(onfly == 0 && new_nblocks_near > 0)
{
STARSH_MALLOC(near_D, new_nblocks_near_local);
size_t size_D = 0;
// Simple cycle over all near-field blocks
for(lbi = 0; lbi < new_nblocks_near_local; lbi++)
{
STARSH_int bi = block_near_local[lbi];
// Get indexes of corresponding block row and block column
STARSH_int i = block_near[2*bi];
STARSH_int j = block_near[2*bi+1];
// Get corresponding sizes and minimum of them
size_t nrows = RC->size[i];
size_t ncols = CC->size[j];
// Update size_D
size_D += nrows*ncols;
}
STARSH_MALLOC(alloc_D, size_D);
// For each near-field block compute its elements
#pragma omp parallel for schedule(dynamic, 1)
for(lbi = 0; lbi < new_nblocks_near_local; lbi++)
{
STARSH_int bi = block_near_local[lbi];
// Get indexes of corresponding block row and block column
STARSH_int i = block_near[2*bi];
STARSH_int j = block_near[2*bi+1];
// Get corresponding sizes and minimum of them
int nrows = RC->size[i];
int ncols = CC->size[j];
int shape[2] = {nrows, ncols};
double *D;
#pragma omp critical
{
D = alloc_D+offset_D;
offset_D += nrows*ncols;
//array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D);
//offset_D += near_D[lbi]->size;
}
array_from_buffer(near_D+lbi, 2, shape, 'd', 'F', D);
#ifdef OPENMP
double time0 = omp_get_wtime();
#endif
kernel(nrows, ncols, RC->pivot+RC->start[i],
CC->pivot+CC->start[j], RD, CD, D, nrows);
#ifdef OPENMP
double time1 = omp_get_wtime();
#pragma omp critical
kernel_time += time1-time0;
#endif
}
}
// Change sizes of far_rank, far_U and far_V if there were false
// far-field blocks
lbj = 0;
for(lbi = 0; lbi < nblocks_far_local; lbi++)
{
if(far_rank[lbi] == -1)
lbj++;
else
{
int shape_U[2] = {far_U[lbi]->shape[0], far_rank[lbi]};
int shape_V[2] = {far_V[lbi]->shape[0], far_rank[lbi]};
array_from_buffer(far_U+lbi-lbj, 2, shape_U, 'd', 'F',
far_U[lbi]->data);
array_from_buffer(far_V+lbi-lbj, 2, shape_V, 'd', 'F',
far_V[lbi]->data);
far_rank[lbi-lbj] = far_rank[lbi];
}
}
if(nblocks_false_far_local > 0 && new_nblocks_far_local > 0)
{
STARSH_REALLOC(far_rank, new_nblocks_far_local);
STARSH_REALLOC(far_U, new_nblocks_far_local);
STARSH_REALLOC(far_V, new_nblocks_far_local);
}
// If all far-field blocks are false, then dealloc buffers
if(new_nblocks_far_local == 0 && nblocks_far_local > 0)
{
block_far = NULL;
free(far_rank);
far_rank = NULL;
free(far_U);
far_U = NULL;
free(far_V);
far_V = NULL;
free(alloc_U);
alloc_U = NULL;
free(alloc_V);
alloc_V = NULL;
}
// Dealloc list of false far-field blocks if it is not empty
if(nblocks_false_far > 0)
free(false_far);
if(nblocks_false_far_local > 0)
free(false_far_local);
// Finish with creating instance of Block Low-Rank Matrix with given
// buffers
#ifdef OPENMP
double mpi_drsdd_time = 0, mpi_kernel_time = 0;
MPI_Reduce(&drsdd_time, &mpi_drsdd_time, 1, MPI_DOUBLE, MPI_SUM, 0,
MPI_COMM_WORLD);
MPI_Reduce(&kernel_time, &mpi_kernel_time, 1, MPI_DOUBLE, MPI_SUM, 0,
MPI_COMM_WORLD);
if(mpi_rank == 0)
{
//STARSH_WARNING("DRSDD kernel total time: %e secs", mpi_drsdd_time);
//STARSH_WARNING("MATRIX kernel total time: %e secs", mpi_kernel_time);
}
#endif
return starsh_blrm_new_mpi(matrix, F, far_rank, far_U, far_V, onfly,
near_D, alloc_U, alloc_V, alloc_D, '1');
}
|
GB_binop__second_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__second_int32
// A.*B function (eWiseMult): GB_AemultB__second_int32
// A*D function (colscale): GB_AxD__second_int32
// D*A function (rowscale): GB_DxB__second_int32
// C+=B function (dense accum): GB_Cdense_accumB__second_int32
// C+=b function (dense accum): GB_Cdense_accumb__second_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__second_int32
// C=scalar+B (none)
// C=scalar+B' (none)
// C=A+scalar GB_bind2nd__second_int32
// C=A'+scalar GB_bind2nd_tran__second_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = bij
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = y ;
// op is second
#define GB_OP_IS_SECOND \
1
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_INT32 || GxB_NO_SECOND_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__second_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__second_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__second_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__second_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__second_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__second_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__second_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__second_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB_bind2nd_tran__second_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
#if !defined(KRATOS_SCHEME )
#define KRATOS_SCHEME
/* System includes */
/* External includes */
/* Project includes */
#include "includes/model_part.h"
#include "utilities/openmp_utils.h"
#include "includes/kratos_parameters.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class Scheme
* @ingroup KratosCore
* @brief This class provides the implementation of the basic tasks that are needed by the solution strategy.
* @details It is intended to be the place for tailoring the solution strategies to problem specific tasks.
* @tparam TSparseSpace The sparse space considered
* @tparam TDenseSpace The dense space considered
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace //= DenseSpace<double>
>
class Scheme
{
public:
///@name Type Definitions
///@{
/// Pointer definition of Scheme
KRATOS_CLASS_POINTER_DEFINITION(Scheme);
/// The definition of the current class
typedef Scheme< TSparseSpace, TDenseSpace > ClassType;
/// Data type definition
typedef typename TSparseSpace::DataType TDataType;
/// Matrix type definition
typedef typename TSparseSpace::MatrixType TSystemMatrixType;
/// Vector type definition
typedef typename TSparseSpace::VectorType TSystemVectorType;
/// Local system matrix type definition
typedef typename TDenseSpace::MatrixType LocalSystemMatrixType;
/// Local system vector type definition
typedef typename TDenseSpace::VectorType LocalSystemVectorType;
/// DoF type definition
typedef Dof<double> TDofType;
/// DoF array type definition
typedef ModelPart::DofsArrayType DofsArrayType;
/// DoF iterator type definition
typedef typename PointerVectorSet<TDofType, IndexedObject>::iterator DofIterator;
/// DoF constant iterator type definition
typedef typename PointerVectorSet<TDofType, IndexedObject>::const_iterator DofConstantIterator;
/// Elements containers definition
typedef ModelPart::ElementsContainerType ElementsArrayType;
/// Conditions containers definition
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default Constructor
* @details Initiliazes the flags
*/
explicit Scheme()
{
mSchemeIsInitialized = false;
mElementsAreInitialized = false;
mConditionsAreInitialized = false;
}
/**
* @brief Constructor with Parameters
*/
explicit Scheme(Parameters ThisParameters)
{
// Validate default parameters
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
mSchemeIsInitialized = false;
mElementsAreInitialized = false;
mConditionsAreInitialized = false;
}
/** Copy Constructor.
*/
explicit Scheme(Scheme& rOther)
:mSchemeIsInitialized(rOther.mSchemeIsInitialized)
,mElementsAreInitialized(rOther.mElementsAreInitialized)
,mConditionsAreInitialized(rOther.mConditionsAreInitialized)
{
}
/** Destructor.
*/
virtual ~Scheme()
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Create method
* @param ThisParameters The configuration parameters
*/
virtual typename ClassType::Pointer Create(Parameters ThisParameters) const
{
return Kratos::make_shared<ClassType>(ThisParameters);
}
/**
* @brief Clone method
* @return The pointer of the cloned scheme
*/
virtual Pointer Clone()
{
return Kratos::make_shared<Scheme>(*this) ;
}
/**
* @brief This is the place to initialize the Scheme.
* @details This is intended to be called just once when the strategy is initialized
* @param rModelPart The model part of the problem to solve
*/
virtual void Initialize(ModelPart& rModelPart)
{
KRATOS_TRY
mSchemeIsInitialized = true;
KRATOS_CATCH("")
}
/**
* @brief This method returns if the scheme is initialized
* @return True if initilized, false otherwise
*/
bool SchemeIsInitialized()
{
return mSchemeIsInitialized;
}
/**
* @brief This method sets if the elements have been initilized or not (true by default)
* @param ElementsAreInitializedFlag If the flag must be set to true or false
*/
void SetSchemeIsInitialized(bool SchemeIsInitializedFlag = true)
{
mSchemeIsInitialized = SchemeIsInitializedFlag;
}
/**
* @brief This method returns if the elements are initialized
* @return True if initilized, false otherwise
*/
bool ElementsAreInitialized()
{
return mElementsAreInitialized;
}
/**
* @brief This method sets if the elements have been initilized or not (true by default)
* @param ElementsAreInitializedFlag If the flag must be set to true or false
*/
void SetElementsAreInitialized(bool ElementsAreInitializedFlag = true)
{
mElementsAreInitialized = ElementsAreInitializedFlag;
}
/**
* @brief This method returns if the conditions are initialized
* @return True if initilized, false otherwise
*/
bool ConditionsAreInitialized()
{
return mConditionsAreInitialized;
}
/**
* @brief This method sets if the conditions have been initilized or not (true by default)
* @param ConditionsAreInitializedFlag If the flag must be set to true or false
*/
void SetConditionsAreInitialized(bool ConditionsAreInitializedFlag = true)
{
mConditionsAreInitialized = ConditionsAreInitializedFlag;
}
/**
* @brief This is the place to initialize the elements.
* @details This is intended to be called just once when the strategy is initialized
* @param rModelPart The model part of the problem to solve
*/
virtual void InitializeElements( ModelPart& rModelPart)
{
KRATOS_TRY
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); i++) {
auto it_elem = rModelPart.ElementsBegin() + i;
it_elem->Initialize(r_current_process_info);
}
SetElementsAreInitialized();
KRATOS_CATCH("")
}
/**
* @brief This is the place to initialize the conditions.
* @details This is intended to be called just once when the strategy is initialized
* @param rModelPart The model part of the problem to solve
*/
virtual void InitializeConditions(ModelPart& rModelPart)
{
KRATOS_TRY
KRATOS_ERROR_IF_NOT(mElementsAreInitialized) << "Before initilizing Conditions, initialize Elements FIRST" << std::endl;
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); i++) {
auto it_cond = rModelPart.ConditionsBegin() + i;
it_cond->Initialize(r_current_process_info);
}
SetConditionsAreInitialized();
KRATOS_CATCH("")
}
/**
* @brief Function called once at the beginning of each solution step.
* @details The basic operations to be carried in there are the following:
* - managing variables to be kept constant over the time step (for example time-Scheme constants depending on the actual time step)
* @param rModelPart The model part of the problem to solve
* @param A LHS matrix
* @param Dx Incremental update of primary variables
* @param b RHS Vector
*/
virtual void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
)
{
KRATOS_TRY
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Definition of the first element iterator
const auto it_elem_begin = rModelPart.ElementsBegin();
// Initializes solution step for all of the elements
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); ++i) {
auto it_elem = it_elem_begin + i;
it_elem->InitializeSolutionStep(r_current_process_info);
}
// Definition of the first condition iterator
const auto it_cond_begin = rModelPart.ConditionsBegin();
// Initializes solution step for all of the conditions
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); ++i) {
auto it_cond = it_cond_begin + i;
it_cond->InitializeSolutionStep(r_current_process_info);
}
// Definition of the first constraint iterator
const auto it_const_begin = rModelPart.MasterSlaveConstraintsBegin();
// Initializes solution step for all of the constraints
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i) {
auto it_const = it_const_begin + i;
it_const->InitializeSolutionStep(r_current_process_info);
}
KRATOS_CATCH("")
}
/**
* @brief Function called once at the end of a solution step, after convergence is reached if an iterative process is needed
* @param rModelPart The model part of the problem to solve
* @param A LHS matrix
* @param Dx Incremental update of primary variables
* @param b RHS Vector
*/
virtual void FinalizeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
KRATOS_TRY
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Definition of the first element iterator
const auto it_elem_begin = rModelPart.ElementsBegin();
// Finalizes solution step for all of the elements
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); ++i) {
auto it_elem = it_elem_begin + i;
it_elem->FinalizeSolutionStep(r_current_process_info);
}
// Definition of the first condition iterator
const auto it_cond_begin = rModelPart.ConditionsBegin();
// Finalizes solution step for all of the conditions
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); ++i) {
auto it_cond = it_cond_begin + i;
it_cond->FinalizeSolutionStep(r_current_process_info);
}
// Definition of the first constraint iterator
const auto it_const_begin = rModelPart.MasterSlaveConstraintsBegin();
// Finalizes solution step for all of the constraints
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i) {
auto it_const = it_const_begin + i;
it_const->FinalizeSolutionStep(r_current_process_info);
}
KRATOS_CATCH("")
}
/************************ BEGIN FRACTIONAL STEP METHODS ****************************/
/********************* TODO: DECIDE IF NECESSARY TO DEFINE *************************/
/***********************************************************************************/
// /**
// * @brief Initializes solution step, to be used when system is not explicitely defined
// * @details For example for fractional step strategies
// * @warning Must be defined in derived classes
// * @param rModelPart The model part of the problem to solve
// */
// virtual void InitializeSolutionStep(ModelPart& rModelPart)
// {
// KRATOS_TRY
// KRATOS_CATCH("")
// }
//
// /**
// * @brief Finalizes solution step, to be used when system is not explicitely defined
// * @details For example for fractional step strategies
// * @warning Must be defined in derived classes
// * @param rModelPart The model part of the problem to solve
// */
// virtual void FinalizeSolutionStep(ModelPart& rModelPart)
// {
// KRATOS_TRY
// KRATOS_CATCH("")
// }
//
// /**
// * @brief Executed before each fractional step
// * @warning Must be defined in derived classes
// * @param rModelPart The model part of the problem to solve
// */
// virtual void InitializeFractionalSolutionStep(ModelPart& rModelPart)
// {
// KRATOS_TRY
// KRATOS_CATCH("")
// }
//
// /**
// * @brief Executed after each fractional step
// * @warning Must be defined in derived classes
// * @param rModelPart The model part of the problem to solve
// */
// virtual void FinalizeFractionalSolutionStep(ModelPart& rModelPart)
// {
// KRATOS_TRY
// KRATOS_CATCH("")
// }
/************************ END FRACTIONAL STEP METHODS ****************************/
/***********************************************************************************/
/**
* @brief unction to be called when it is needed to initialize an iteration. It is designed to be called at the beginning of each non linear iteration
* @note Take care: the elemental function with the same name is NOT called here.
* @warning Must be defined in derived classes
* @details The function is called in the builder for memory efficiency
* @param rModelPart The model part of the problem to solve
* @param A LHS matrix
* @param Dx Incremental update of primary variables
* @param b RHS Vector
*/
virtual void InitializeNonLinIteration(
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
)
{
KRATOS_TRY
KRATOS_CATCH("")
}
/**
* @brief It initializes a non-linear iteration (for an individual condition)
* @warning Must be defined in derived classes
* @param rCurrentElement The element to compute
* @param rCurrentProcessInfo The current process info instance
*/
virtual void InitializeNonLinearIteration(
Element::Pointer rCurrentElement,
ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_TRY
KRATOS_CATCH("")
}
/**
* @brief It initializes a non-linear iteration (for an individual condition)
* @warning Must be defined in derived classes
* @param rCurrentCondition The condition to compute
* @param rCurrentProcessInfo The current process info instance
*/
virtual void InitializeNonLinearIteration(
Condition::Pointer rCurrentCondition,
ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_TRY
KRATOS_CATCH("")
}
/**
* @brief Function to be called when it is needed to finalize an iteration. It is designed to be called at the end of each non linear iteration
* @param rModelPart The model part of the problem to solve
* @param A LHS matrix
* @param Dx Incremental update of primary variables
* @param b RHS Vector
*/
virtual void FinalizeNonLinIteration(
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
)
{
KRATOS_TRY
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Definition of the first element iterator
const auto it_elem_begin = rModelPart.ElementsBegin();
// Finalizes non-linear iteration for all of the elements
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); ++i) {
auto it_elem = it_elem_begin + i;
it_elem->FinalizeNonLinearIteration(r_current_process_info);
}
// Definition of the first condition iterator
const auto it_cond_begin = rModelPart.ConditionsBegin();
// Finalizes non-linear iteration for all of the conditions
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); ++i) {
auto it_cond = it_cond_begin + i;
it_cond->FinalizeNonLinearIteration(r_current_process_info);
}
// Definition of the first constraint iterator
const auto it_const_begin = rModelPart.MasterSlaveConstraintsBegin();
// Finalizes non-linear iteration for all of the constraints
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i) {
auto it_const = it_const_begin + i;
it_const->FinalizeNonLinearIteration(r_current_process_info);
}
KRATOS_CATCH("")
}
/**
* @brief Performing the prediction of the solution.
* @warning Must be defined in derived classes
* @param rModelPart The model part of the problem to solve
* @param A LHS matrix
* @param Dx Incremental update of primary variables
* @param b RHS Vector
*/
virtual void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
)
{
KRATOS_TRY
KRATOS_CATCH("")
}
/**
* @brief Performing the update of the solution.
* @warning Must be defined in derived classes
* @param rModelPart The model part of the problem to solve
* @param rDofSet Set of all primary variables
* @param A LHS matrix
* @param Dx Incremental update of primary variables
* @param b RHS Vector
*/
virtual void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
)
{
KRATOS_TRY
KRATOS_CATCH("")
}
/**
* @brief Functions to be called to prepare the data needed for the output of results.
* @warning Must be defined in derived classes
* @param rModelPart The model part of the problem to solve
* @param rDofSet Set of all primary variables
* @param A LHS matrix
* @param Dx Incremental update of primary variables
* @param b RHS Vector
*/
virtual void CalculateOutputData(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
)
{
KRATOS_TRY
KRATOS_CATCH("")
}
/**
* @brief Functions that cleans the results data.
* @warning Must be implemented in the derived classes
*/
virtual void CleanOutputData()
{
KRATOS_TRY
KRATOS_CATCH("")
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed after the end of the solution step
* @warning Must be implemented in the derived classes
*/
virtual void Clean()
{
KRATOS_TRY
KRATOS_CATCH("")
}
/**
* @brief Liberate internal storage.
* @warning Must be implemented in the derived classes
*/
virtual void Clear()
{
KRATOS_TRY
KRATOS_CATCH("")
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @details Checks can be "expensive" as the function is designed
* @param rModelPart The model part of the problem to solve
* @return 0 all OK, 1 otherwise
*/
virtual int Check(const ModelPart& rModelPart) const
{
KRATOS_TRY
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Checks for all of the elements
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.NumberOfElements()); i++) {
auto it_elem = rModelPart.ElementsBegin() + i;
const auto& r_elem = *it_elem;
r_elem.Check(r_current_process_info);
}
// Checks for all of the conditions
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.NumberOfConditions()); i++) {
auto it_cond = rModelPart.ConditionsBegin() + i;
const auto& r_cond = *it_cond;
r_cond.Check(r_current_process_info);
}
// Checks for all of the constraints
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.NumberOfMasterSlaveConstraints()); i++) {
auto it_constraint = rModelPart.MasterSlaveConstraintsBegin() + i;
const auto& r_constraint = *it_constraint;
r_constraint.Check(r_current_process_info);
}
return 0;
KRATOS_CATCH("");
}
virtual int Check(ModelPart& rModelPart)
{
// calling the const version for backward compatibility
const Scheme& r_const_this = *this;
const ModelPart& r_const_model_part = rModelPart;
return r_const_this.Check(r_const_model_part);
}
/**
* @brief This function is designed to be called in the builder and solver to introduce the selected time integration scheme.
* @details It "asks" the matrix needed to the element and performs the operations needed to introduce the selected time integration scheme. This function calculates at the same time the contribution to the LHS and to the RHS of the system
* @param rElement The element to compute
* @param LHS_Contribution The LHS matrix contribution
* @param RHS_Contribution The RHS vector contribution
* @param rEquationIdVector The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
virtual void CalculateSystemContributions(
Element& rElement,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& rEquationIdVector,
const ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_START_IGNORING_DEPRECATED_FUNCTION_WARNING
this->CalculateSystemContributions(
Element::Pointer(&rElement),
LHS_Contribution,
RHS_Contribution,
rEquationIdVector,
const_cast<ProcessInfo&>(rCurrentProcessInfo)
); // TODO remove this after the transition period and uncomment the following
KRATOS_STOP_IGNORING_DEPRECATED_FUNCTION_WARNING
// rElement.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, rCurrentProcessInfo);
}
KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function")
virtual void CalculateSystemContributions(
Element::Pointer pCurrentElement,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_START_IGNORING_DEPRECATED_FUNCTION_WARNING
pCurrentElement->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, rCurrentProcessInfo);
KRATOS_STOP_IGNORING_DEPRECATED_FUNCTION_WARNING
}
/**
* @brief Functions totally analogous to the precedent but applied to the "condition" objects
* @param rCondition The condition to compute
* @param LHS_Contribution The LHS matrix contribution
* @param RHS_Contribution The RHS vector contribution
* @param rEquationIdVector The ID's of the condition degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
virtual void CalculateSystemContributions(
Condition& rCondition,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& rEquationIdVector,
const ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_START_IGNORING_DEPRECATED_FUNCTION_WARNING
this->Condition_CalculateSystemContributions(
Condition::Pointer(&rCondition),
LHS_Contribution,
RHS_Contribution,
rEquationIdVector,
const_cast<ProcessInfo&>(rCurrentProcessInfo)
); // TODO remove this after the transition period and uncomment the following
KRATOS_STOP_IGNORING_DEPRECATED_FUNCTION_WARNING
// rCondition.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, rCurrentProcessInfo);
}
KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function")
virtual void Condition_CalculateSystemContributions(
Condition::Pointer pCurrentCondition,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_START_IGNORING_DEPRECATED_FUNCTION_WARNING
pCurrentCondition->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, rCurrentProcessInfo);
KRATOS_STOP_IGNORING_DEPRECATED_FUNCTION_WARNING
}
/**
* @brief This function is designed to calculate just the RHS contribution
* @param rElement The element to compute
* @param RHS_Contribution The RHS vector contribution
* @param rEquationIdVector The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
virtual void CalculateRHSContribution(
Element& rElement,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& rEquationIdVector,
const ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_START_IGNORING_DEPRECATED_FUNCTION_WARNING
this->Calculate_RHS_Contribution(
Element::Pointer(&rElement),
RHS_Contribution,
rEquationIdVector,
const_cast<ProcessInfo&>(rCurrentProcessInfo)
); // TODO remove this after the transition period and uncomment the following
KRATOS_STOP_IGNORING_DEPRECATED_FUNCTION_WARNING
// rElement.CalculateRightHandSide(RHS_Contribution, rCurrentProcessInfo);
}
KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function")
virtual void Calculate_RHS_Contribution(
Element::Pointer pCurrentElement,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_START_IGNORING_DEPRECATED_FUNCTION_WARNING
pCurrentElement->CalculateRightHandSide(RHS_Contribution, rCurrentProcessInfo);
KRATOS_STOP_IGNORING_DEPRECATED_FUNCTION_WARNING
}
/**
* @brief Functions totally analogous to the precedent but applied to the "condition" objects
* @param rCondition The condition to compute
* @param RHS_Contribution The RHS vector contribution
* @param rEquationIdVector The ID's of the condition degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
virtual void CalculateRHSContribution(
Condition& rCondition,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& rEquationIdVector,
const ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_START_IGNORING_DEPRECATED_FUNCTION_WARNING
this->Condition_Calculate_RHS_Contribution(
Condition::Pointer(&rCondition),
RHS_Contribution,
rEquationIdVector,
const_cast<ProcessInfo&>(rCurrentProcessInfo)
); // TODO remove this after the transition period and uncomment the following
KRATOS_STOP_IGNORING_DEPRECATED_FUNCTION_WARNING
// rCondition.CalculateRightHandSide(RHS_Contribution, rCurrentProcessInfo);
}
KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function")
virtual void Condition_Calculate_RHS_Contribution(
Condition::Pointer pCurrentCondition,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_START_IGNORING_DEPRECATED_FUNCTION_WARNING
pCurrentCondition->CalculateRightHandSide(RHS_Contribution, rCurrentProcessInfo);
KRATOS_STOP_IGNORING_DEPRECATED_FUNCTION_WARNING
}
/**
* @brief This function is designed to calculate just the LHS contribution
* @param rElement The element to compute
* @param LHS_Contribution The RHS vector contribution
* @param rEquationIdVector The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
virtual void CalculateLHSContribution(
Element& rElement,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& rEquationIdVector,
const ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_START_IGNORING_DEPRECATED_FUNCTION_WARNING
this->Calculate_LHS_Contribution(
Element::Pointer(&rElement),
LHS_Contribution,
rEquationIdVector,
const_cast<ProcessInfo&>(rCurrentProcessInfo)
); // TODO remove this after the transition period and uncomment the following
KRATOS_STOP_IGNORING_DEPRECATED_FUNCTION_WARNING
// rElement.CalculateLeftHandSide(LHS_Contribution, rCurrentProcessInfo);
}
KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function")
virtual void Calculate_LHS_Contribution(
Element::Pointer pCurrentElement,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_START_IGNORING_DEPRECATED_FUNCTION_WARNING
pCurrentElement->CalculateLeftHandSide(LHS_Contribution, rCurrentProcessInfo);
KRATOS_STOP_IGNORING_DEPRECATED_FUNCTION_WARNING
}
/**
* @brief Functions totally analogous to the precedent but applied to the "condition" objects
* @param rCondition The condition to compute
* @param LHS_Contribution The RHS vector contribution
* @param rEquationIdVector The ID's of the condition degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
virtual void CalculateLHSContribution(
Condition& rCondition,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& rEquationIdVector,
const ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_START_IGNORING_DEPRECATED_FUNCTION_WARNING
this->Condition_Calculate_LHS_Contribution(
Condition::Pointer(&rCondition),
LHS_Contribution,
rEquationIdVector,
const_cast<ProcessInfo&>(rCurrentProcessInfo)
); // TODO remove this after the transition period and uncomment the following
KRATOS_STOP_IGNORING_DEPRECATED_FUNCTION_WARNING
// rCondition.CalculateLeftHandSide(LHS_Contribution, rCurrentProcessInfo);
}
KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function")
virtual void Condition_Calculate_LHS_Contribution(
Condition::Pointer pCurrentCondition,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId,
ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_START_IGNORING_DEPRECATED_FUNCTION_WARNING
pCurrentCondition->CalculateLeftHandSide(LHS_Contribution, rCurrentProcessInfo);
KRATOS_STOP_IGNORING_DEPRECATED_FUNCTION_WARNING
}
/**
* @brief This method gets the eqaution id corresponding to the current element
* @param rElement The element to compute
* @param rEquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
virtual void EquationId(
const Element& rElement,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
)
{
rElement.EquationIdVector(rEquationId, rCurrentProcessInfo);
}
KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function")
virtual void EquationId(
Element::Pointer pCurrentElement,
Element::EquationIdVectorType& EquationId,
ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_START_IGNORING_DEPRECATED_FUNCTION_WARNING
(pCurrentElement)->EquationIdVector(EquationId, rCurrentProcessInfo);
KRATOS_STOP_IGNORING_DEPRECATED_FUNCTION_WARNING
}
/**
* @brief Functions totally analogous to the precedent but applied to the "condition" objects
* @param rCondition The condition to compute
* @param rEquationId The ID's of the condition degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
virtual void EquationId(
const Condition& rCondition,
Element::EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
)
{
rCondition.EquationIdVector(rEquationId, rCurrentProcessInfo);
}
KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function")
virtual void Condition_EquationId(
Condition::Pointer pCurrentCondition,
Element::EquationIdVectorType& EquationId,
ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_START_IGNORING_DEPRECATED_FUNCTION_WARNING
(pCurrentCondition)->EquationIdVector(EquationId, rCurrentProcessInfo);
KRATOS_STOP_IGNORING_DEPRECATED_FUNCTION_WARNING
}
/**
* @brief Function that returns the list of Degrees of freedom to be assembled in the system for a Given element
* @param pCurrentElement The element to compute
* @param rDofList The list containing the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
virtual void GetDofList(
const Element& rElement,
Element::DofsVectorType& rDofList,
const ProcessInfo& rCurrentProcessInfo
)
{
rElement.GetDofList(rDofList, rCurrentProcessInfo);
}
KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function")
virtual void GetElementalDofList(
Element::Pointer pCurrentElement,
Element::DofsVectorType& ElementalDofList,
ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_START_IGNORING_DEPRECATED_FUNCTION_WARNING
pCurrentElement->GetDofList(ElementalDofList, rCurrentProcessInfo);
KRATOS_STOP_IGNORING_DEPRECATED_FUNCTION_WARNING
}
/**
* @brief Function that returns the list of Degrees of freedom to be assembled in the system for a Given condition
* @param rCondition The condition to compute
* @param rDofList The list containing the condition degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
virtual void GetDofList(
const Condition& rCondition,
Element::DofsVectorType& rDofList,
const ProcessInfo& rCurrentProcessInfo
)
{
rCondition.GetDofList(rDofList, rCurrentProcessInfo);
}
KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function")
virtual void GetConditionDofList(
Condition::Pointer pCurrentCondition,
Element::DofsVectorType& ConditionDofList,
ProcessInfo& rCurrentProcessInfo
)
{
KRATOS_START_IGNORING_DEPRECATED_FUNCTION_WARNING
pCurrentCondition->GetDofList(ConditionDofList, rCurrentProcessInfo);
KRATOS_STOP_IGNORING_DEPRECATED_FUNCTION_WARNING
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
*/
virtual Parameters GetDefaultParameters() const
{
const Parameters default_parameters = Parameters(R"(
{
"name" : "scheme"
})" );
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "scheme";
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const
{
return "Scheme";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
rOStream << Info();
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
bool mSchemeIsInitialized; /// Flag to be used in controlling if the Scheme has been intialized or not
bool mElementsAreInitialized; /// Flag taking in account if the elements were initialized correctly or not
bool mConditionsAreInitialized; /// Flag taking in account if the conditions were initialized correctly or not
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method validate and assign default parameters
* @param rParameters Parameters to be validated
* @param DefaultParameters The default parameters
* @return Returns validated Parameters
*/
virtual Parameters ValidateAndAssignParameters(
Parameters ThisParameters,
const Parameters DefaultParameters
) const
{
ThisParameters.ValidateAndAssignDefaults(DefaultParameters);
return ThisParameters;
}
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
virtual void AssignSettings(const Parameters ThisParameters)
{
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class Scheme
} // namespace Kratos.
#endif /* KRATOS_SCHEME defined */
|
fig4.87-nested-parallel-mod.c | /*
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
Copyright 2009 Sun Microsystems, Inc. All rights reserved.
The contents of this file are subject to the terms of the BSD License("BSD")(the "License").
You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt
The BSD License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistribution of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistribution in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Sun Microsystems, Inc. or the names of
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
This software is provided "AS IS," without a warranty of any kind. ALL
EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR
NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND
ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A
RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES.
IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT
OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR
PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS
BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
You acknowledge that this software is not designed, licensed or intended for
use in the design, construction, operation or maintenance of any nuclear facility.
*/
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#define TRUE 1
#define FALSE 0
#else
#define omp_get_thread_num() 0
#define omp_get_num_threads() 1
#define omp_get_nested() 0
#endif
int main()
{
int TID = -1;
#ifdef _OPENMP
(void) omp_set_dynamic(FALSE);
if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");}
(void) omp_set_num_threads(3);
(void) omp_set_nested(TRUE);
if (! omp_get_nested()) {printf("Warning: nested parallelism not set\n");}
#endif
printf("Nested parallelism is %s\n",
omp_get_nested() ? "supported" : "not supported");
/*
------------------------------------------------------------------------
Inside the parallel region we can distinguish between the threads
------------------------------------------------------------------------
*/
#pragma omp parallel private(TID)
{
TID = omp_get_thread_num();
printf("Thread %d executes the outer parallel region\n",TID);
#pragma omp parallel num_threads(2) firstprivate(TID)
{
printf("TID %d: Thread %d executes inner parallel region\n",
TID,omp_get_thread_num());
} /*-- End of inner parallel region --*/
} /*-- End of outer parallel region --*/
return(0);
}
|
interp2.c | /*
* Academic License - for use in teaching, academic research, and meeting
* course requirements at degree granting institutions only. Not for
* government, commercial, or other organizational use.
*
* interp2.c
*
* Code generation for function 'interp2'
*
*/
/* Include files */
#include "interp2.h"
#include "eml_int_forloop_overflow_check.h"
#include "no_raytracing_latlon_data.h"
#include "no_raytracing_latlon_emxutil.h"
#include "no_raytracing_latlon_types.h"
#include "rt_nonfinite.h"
#include "mwmathutil.h"
/* Variable Definitions */
static emlrtRSInfo gd_emlrtRSI = { 274,/* lineNo */
"interp2_local", /* fcnName */
"C:\\Program Files\\MATLAB\\R2020b\\toolbox\\eml\\lib\\matlab\\polyfun\\interp2.m"/* pathName */
};
static emlrtRTEInfo jg_emlrtRTEI = { 268,/* lineNo */
21, /* colNo */
"interp2", /* fName */
"C:\\Program Files\\MATLAB\\R2020b\\toolbox\\eml\\lib\\matlab\\polyfun\\interp2.m"/* pName */
};
/* Function Definitions */
void interp2_local(const emlrtStack *sp, const emxArray_real_T *V, const
emxArray_real_T *Xq, const emxArray_real_T *Yq,
emxArray_real_T *Vq)
{
jmp_buf * volatile emlrtJBStack;
emlrtStack b_st;
emlrtStack st;
real_T qx1;
real_T qx2;
real_T rx;
real_T ry;
real_T zx1y2;
int32_T ix;
int32_T ixmax;
int32_T iy;
int32_T iymax;
int32_T k;
int32_T ub_loop;
st.prev = sp;
st.tls = sp->tls;
b_st.prev = &st;
b_st.tls = st.tls;
ixmax = Vq->size[0];
Vq->size[0] = Xq->size[0];
emxEnsureCapacity_real_T(sp, Vq, ixmax, &jg_emlrtRTEI);
ixmax = V->size[1] - 1;
iymax = V->size[0] - 1;
st.site = &gd_emlrtRSI;
if ((1 <= Xq->size[0]) && (Xq->size[0] > 2147483646)) {
b_st.site = &x_emlrtRSI;
check_forloop_overflow_error(&b_st);
}
ub_loop = Xq->size[0] - 1;
emlrtEnterParallelRegion(sp, omp_in_parallel());
emlrtPushJmpBuf(sp, &emlrtJBStack);
#pragma omp parallel for \
num_threads(emlrtAllocRegionTLSs(sp->tls, omp_in_parallel(), omp_get_max_threads(), omp_get_num_procs())) \
private(ix,iy,ry,qx1,zx1y2,qx2,rx)
for (k = 0; k <= ub_loop; k++) {
if ((Xq->data[k] >= 1.0) && (Xq->data[k] <= V->size[1]) && (Yq->data[k] >=
1.0) && (Yq->data[k] <= V->size[0])) {
if (Xq->data[k] <= 1.0) {
ix = 1;
} else if (Xq->data[k] <= ixmax) {
ix = (int32_T)muDoubleScalarFloor(Xq->data[k]);
} else {
ix = ixmax;
}
if (Yq->data[k] <= 1.0) {
iy = 1;
} else if (Yq->data[k] <= iymax) {
iy = (int32_T)muDoubleScalarFloor(Yq->data[k]);
} else {
iy = iymax;
}
ry = V->data[(iy + V->size[0] * (ix - 1)) - 1];
qx1 = V->data[(iy + V->size[0] * ix) - 1];
zx1y2 = V->data[iy + V->size[0] * (ix - 1)];
qx2 = V->data[iy + V->size[0] * ix];
if (Xq->data[k] == ix) {
qx1 = ry;
qx2 = zx1y2;
} else {
if (!(Xq->data[k] == (real_T)ix + 1.0)) {
rx = (Xq->data[k] - (real_T)ix) / (((real_T)ix + 1.0) - (real_T)ix);
if (ry == qx1) {
qx1 = ry;
} else {
qx1 = (1.0 - rx) * ry + rx * qx1;
}
if (zx1y2 == qx2) {
qx2 = zx1y2;
} else {
qx2 = (1.0 - rx) * zx1y2 + rx * qx2;
}
}
}
if ((Yq->data[k] == iy) || (qx1 == qx2)) {
Vq->data[k] = qx1;
} else if (Yq->data[k] == (real_T)iy + 1.0) {
Vq->data[k] = qx2;
} else {
ry = (Yq->data[k] - (real_T)iy) / (((real_T)iy + 1.0) - (real_T)iy);
Vq->data[k] = (1.0 - ry) * qx1 + ry * qx2;
}
} else {
Vq->data[k] = rtNaN;
}
}
emlrtPopJmpBuf(sp, &emlrtJBStack);
emlrtExitParallelRegion(sp, omp_in_parallel());
}
/* End of code generation (interp2.c) */
|
03_loop_decompose_2.c | #include <stdio.h>
#include <omp.h>
#define MAX_ITS 10000
int main()
{
int nproc, i, sum;
nproc = omp_get_max_threads();
int its_per_proc[nproc];
for (i = 0; i< nproc; ++i){
its_per_proc[i] = 0;
}
#pragma omp parallel for
for (i = 0; i< MAX_ITS; ++i){
its_per_proc[omp_get_thread_num()]++;
}
sum = 0;
for (i = 0; i< nproc; ++i){
printf("Processor %i performed %i iterations\n", i, its_per_proc[i]);
sum += its_per_proc[i];
}
printf("Total work on all processors is %i\n", sum);
}
|
par_add_cycle.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* ParAMG cycling routine
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
/*--------------------------------------------------------------------------
* hypre_BoomerAMGCycle
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGAdditiveCycle( void *amg_vdata)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata;
/* Data Structure variables */
hypre_ParCSRMatrix **A_array;
hypre_ParCSRMatrix **P_array;
hypre_ParCSRMatrix **R_array;
hypre_ParCSRMatrix *Lambda;
hypre_ParCSRMatrix *Atilde;
hypre_ParVector **F_array;
hypre_ParVector **U_array;
hypre_ParVector *Vtemp;
hypre_ParVector *Ztemp;
hypre_ParVector *Xtilde, *Rtilde;
HYPRE_Int **CF_marker_array;
HYPRE_Int num_levels;
HYPRE_Int addlvl, add_end;
HYPRE_Int additive;
HYPRE_Int mult_additive;
HYPRE_Int simple;
HYPRE_Int add_last_lvl;
HYPRE_Int i, j, num_rows;
HYPRE_Int n_global;
HYPRE_Int rlx_order;
/* Local variables */
HYPRE_Int Solve_err_flag = 0;
HYPRE_Int level;
HYPRE_Int coarse_grid;
HYPRE_Int fine_grid;
HYPRE_Int rlx_down;
HYPRE_Int rlx_up;
HYPRE_Int rlx_coarse;
HYPRE_Int *grid_relax_type;
HYPRE_Int *num_grid_sweeps;
hypre_Vector **l1_norms;
HYPRE_Real alpha, beta;
HYPRE_Real *u_data;
HYPRE_Real *v_data;
hypre_Vector *l1_norms_lvl;
HYPRE_Real *D_inv;
HYPRE_Real *x_global;
HYPRE_Real *r_global;
HYPRE_Real *relax_weight;
HYPRE_Real *omega;
#if 0
HYPRE_Real *D_mat;
HYPRE_Real *S_vec;
#endif
HYPRE_ANNOTATE_FUNC_BEGIN;
/* Acquire data and allocate storage */
A_array = hypre_ParAMGDataAArray(amg_data);
F_array = hypre_ParAMGDataFArray(amg_data);
U_array = hypre_ParAMGDataUArray(amg_data);
P_array = hypre_ParAMGDataPArray(amg_data);
R_array = hypre_ParAMGDataRArray(amg_data);
CF_marker_array = hypre_ParAMGDataCFMarkerArray(amg_data);
Vtemp = hypre_ParAMGDataVtemp(amg_data);
Ztemp = hypre_ParAMGDataZtemp(amg_data);
num_levels = hypre_ParAMGDataNumLevels(amg_data);
additive = hypre_ParAMGDataAdditive(amg_data);
mult_additive = hypre_ParAMGDataMultAdditive(amg_data);
simple = hypre_ParAMGDataSimple(amg_data);
add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data);
grid_relax_type = hypre_ParAMGDataGridRelaxType(amg_data);
Lambda = hypre_ParAMGDataLambda(amg_data);
Atilde = hypre_ParAMGDataAtilde(amg_data);
Xtilde = hypre_ParAMGDataXtilde(amg_data);
Rtilde = hypre_ParAMGDataRtilde(amg_data);
l1_norms = hypre_ParAMGDataL1Norms(amg_data);
D_inv = hypre_ParAMGDataDinv(amg_data);
relax_weight = hypre_ParAMGDataRelaxWeight(amg_data);
omega = hypre_ParAMGDataOmega(amg_data);
rlx_order = hypre_ParAMGDataRelaxOrder(amg_data);
num_grid_sweeps = hypre_ParAMGDataNumGridSweeps(amg_data);
/* Initialize */
addlvl = hypre_max(additive, mult_additive);
addlvl = hypre_max(addlvl, simple);
if (add_last_lvl == -1 ) add_end = num_levels-1;
else add_end = add_last_lvl;
Solve_err_flag = 0;
/*---------------------------------------------------------------------
* Main loop of cycling --- multiplicative version --- V-cycle
*--------------------------------------------------------------------*/
/* down cycle */
rlx_down = grid_relax_type[1];
rlx_up = grid_relax_type[2];
rlx_coarse = grid_relax_type[3];
for (level = 0; level < num_levels-1; level++)
{
HYPRE_ANNOTATE_MGLEVEL_BEGIN(level);
fine_grid = level;
coarse_grid = level + 1;
u_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[fine_grid]));
v_data = hypre_VectorData(hypre_ParVectorLocalVector(Vtemp));
l1_norms_lvl = l1_norms[level];
hypre_ParVectorSetConstantValues(U_array[coarse_grid], 0.0);
if (level < addlvl || level > add_end) /* multiplicative version */
{
/* smoothing step */
if (rlx_down == 0)
{
HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A_array[fine_grid]));
HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A_array[fine_grid]));
num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[fine_grid]));
for (j=0; j < num_grid_sweeps[1]; j++)
{
hypre_ParVectorCopy(F_array[fine_grid],Vtemp);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
u_data[i] = relax_weight[level]*v_data[i] / A_data[A_i[i]];
}
}
else if (rlx_down != 18)
{
/*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_down,0,*/
for (j=0; j < num_grid_sweeps[1]; j++)
{
hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid],
CF_marker_array[fine_grid], rlx_down,rlx_order,1,
relax_weight[fine_grid], omega[fine_grid],
l1_norms[level] ? hypre_VectorData(l1_norms[level]) : NULL,
U_array[fine_grid], Vtemp, Ztemp);
hypre_ParVectorCopy(F_array[fine_grid],Vtemp);
}
}
else
{
num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[fine_grid]));
for (j=0; j < num_grid_sweeps[1]; j++)
{
hypre_ParVectorCopy(F_array[fine_grid],Vtemp);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
u_data[i] += v_data[i] / hypre_VectorData(l1_norms_lvl)[i];
}
}
}
alpha = -1.0;
beta = 1.0;
hypre_ParCSRMatrixMatvec(alpha, A_array[fine_grid], U_array[fine_grid],
beta, Vtemp);
alpha = 1.0;
beta = 0.0;
hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp,
beta,F_array[coarse_grid]);
}
else /* additive version */
{
hypre_ParVectorCopy(F_array[fine_grid],Vtemp);
if (level == 0) /* compute residual */
{
hypre_ParVectorCopy(Vtemp, Rtilde);
hypre_ParVectorCopy(U_array[fine_grid],Xtilde);
}
alpha = 1.0;
beta = 0.0;
hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp,
beta,F_array[coarse_grid]);
}
HYPRE_ANNOTATE_MGLEVEL_END(level);
}
/* additive smoothing and solve coarse grid */
HYPRE_ANNOTATE_MGLEVEL_BEGIN(num_levels - 1);
if (addlvl < num_levels)
{
if (simple > -1)
{
x_global = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde));
r_global = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde));
n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Xtilde));
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_global; i++)
x_global[i] += D_inv[i]*r_global[i];
}
else
{
if (num_grid_sweeps[1] > 1)
{
n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Rtilde));
hypre_ParVector *Tmptilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
hypre_Vector *Tmptilde_local = hypre_SeqVectorCreate(n_global);
hypre_SeqVectorInitialize(Tmptilde_local);
hypre_ParVectorLocalVector(Tmptilde) = Tmptilde_local;
hypre_ParVectorOwnsData(Tmptilde) = 1;
hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 0.0, Tmptilde);
hypre_ParVectorScale(2.0,Rtilde);
hypre_ParCSRMatrixMatvec(-1.0, Atilde, Tmptilde, 1.0, Rtilde);
hypre_ParVectorDestroy(Tmptilde);
}
hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 1.0, Xtilde);
}
if (addlvl == 0) hypre_ParVectorCopy(Xtilde, U_array[0]);
}
if (add_end < num_levels -1)
{
fine_grid = num_levels -1;
for (j=0; j < num_grid_sweeps[3]; j++)
if (rlx_coarse == 18)
hypre_ParCSRRelax(A_array[fine_grid], F_array[fine_grid],
1, 1,
l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL,
1.0, 1.0 ,0,0,0,0,
U_array[fine_grid], Vtemp, Ztemp);
else
hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid],
NULL, rlx_coarse,0,0,
relax_weight[fine_grid], omega[fine_grid],
l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL,
U_array[fine_grid], Vtemp, Ztemp);
}
HYPRE_ANNOTATE_MGLEVEL_END(num_levels - 1);
/* up cycle */
for (level = num_levels-1; level > 0; level--)
{
HYPRE_ANNOTATE_MGLEVEL_BEGIN(level);
fine_grid = level - 1;
coarse_grid = level;
if (level <= addlvl || level > add_end+1) /* multiplicative version */
{
alpha = 1.0;
beta = 1.0;
hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid],
U_array[coarse_grid],
beta, U_array[fine_grid]);
if (rlx_up != 18)
/*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_up,0,*/
for (j=0; j < num_grid_sweeps[2]; j++)
hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid],
CF_marker_array[fine_grid],
rlx_up,rlx_order,2,
relax_weight[fine_grid], omega[fine_grid],
l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL,
U_array[fine_grid], Vtemp, Ztemp);
else if (rlx_order)
{
HYPRE_Int loc_relax_points[2];
loc_relax_points[0] = -1;
loc_relax_points[1] = 1;
for (j=0; j < num_grid_sweeps[2]; j++)
for (i=0; i < 2; i++)
hypre_ParCSRRelax_L1_Jacobi(A_array[fine_grid],F_array[fine_grid],
CF_marker_array[fine_grid],
loc_relax_points[i],
1.0,
l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL,
U_array[fine_grid], Vtemp);
}
else
for (j=0; j < num_grid_sweeps[2]; j++)
hypre_ParCSRRelax(A_array[fine_grid], F_array[fine_grid],
1, 1,
l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL,
1.0, 1.0 ,0,0,0,0,
U_array[fine_grid], Vtemp, Ztemp);
}
else /* additive version */
{
alpha = 1.0;
beta = 1.0;
hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid],
U_array[coarse_grid],
beta, U_array[fine_grid]);
}
HYPRE_ANNOTATE_MGLEVEL_END(level);
}
HYPRE_ANNOTATE_FUNC_END;
return(Solve_err_flag);
}
HYPRE_Int hypre_CreateLambda(void *amg_vdata)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata;
/* Data Structure variables */
MPI_Comm comm;
hypre_ParCSRMatrix **A_array;
hypre_ParVector **F_array;
hypre_ParVector **U_array;
hypre_ParCSRMatrix *A_tmp;
hypre_ParCSRMatrix *Lambda;
hypre_CSRMatrix *L_diag;
hypre_CSRMatrix *L_offd;
hypre_ParCSRMatrix *Atilde;
hypre_CSRMatrix *Atilde_diag;
hypre_CSRMatrix *Atilde_offd;
HYPRE_Real *Atilde_diag_data;
HYPRE_Real *Atilde_offd_data;
hypre_CSRMatrix *A_tmp_diag;
hypre_CSRMatrix *A_tmp_offd;
hypre_ParVector *Xtilde;
hypre_ParVector *Rtilde;
hypre_Vector *Xtilde_local;
hypre_Vector *Rtilde_local;
hypre_ParCSRCommPkg *comm_pkg;
hypre_ParCSRCommPkg *L_comm_pkg = NULL;
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Real *L_diag_data;
HYPRE_Real *L_offd_data;
HYPRE_Real *buf_data = NULL;
HYPRE_Real *tmp_data;
HYPRE_Real *x_data;
HYPRE_Real *r_data;
hypre_Vector *l1_norms;
HYPRE_Real *A_tmp_diag_data;
HYPRE_Real *A_tmp_offd_data;
HYPRE_Real *D_data = NULL;
HYPRE_Real *D_data_offd = NULL;
HYPRE_Int *L_diag_i;
HYPRE_Int *L_diag_j;
HYPRE_Int *L_offd_i;
HYPRE_Int *L_offd_j;
HYPRE_Int *Atilde_diag_i;
HYPRE_Int *Atilde_diag_j;
HYPRE_Int *Atilde_offd_i;
HYPRE_Int *Atilde_offd_j;
HYPRE_Int *A_tmp_diag_i;
HYPRE_Int *A_tmp_offd_i;
HYPRE_Int *A_tmp_diag_j;
HYPRE_Int *A_tmp_offd_j;
HYPRE_Int *L_recv_ptr = NULL;
HYPRE_Int *L_send_ptr = NULL;
HYPRE_Int *L_recv_procs = NULL;
HYPRE_Int *L_send_procs = NULL;
HYPRE_Int *L_send_map_elmts = NULL;
HYPRE_Int *recv_procs;
HYPRE_Int *send_procs;
HYPRE_Int *send_map_elmts;
HYPRE_Int *send_map_starts;
HYPRE_Int *recv_vec_starts;
HYPRE_Int *all_send_procs = NULL;
HYPRE_Int *all_recv_procs = NULL;
HYPRE_Int *remap = NULL;
HYPRE_Int *level_start;
HYPRE_Int addlvl;
HYPRE_Int additive;
HYPRE_Int mult_additive;
HYPRE_Int num_levels;
HYPRE_Int num_add_lvls;
HYPRE_Int num_procs;
HYPRE_Int num_sends, num_recvs;
HYPRE_Int num_sends_L = 0;
HYPRE_Int num_recvs_L = 0;
HYPRE_Int send_data_L = 0;
HYPRE_Int num_rows_L = 0;
HYPRE_Int num_rows_tmp = 0;
HYPRE_Int num_cols_offd_L = 0;
HYPRE_Int num_cols_offd = 0;
HYPRE_Int level, i, j, k;
HYPRE_Int this_proc, cnt, cnt_diag, cnt_offd;
HYPRE_Int A_cnt_diag, A_cnt_offd;
HYPRE_Int cnt_recv, cnt_send, cnt_row, row_start;
HYPRE_Int start_diag, start_offd, indx, cnt_map;
HYPRE_Int start, j_indx, index, cnt_level;
HYPRE_Int max_sends, max_recvs;
HYPRE_Int ns;
/* Local variables */
HYPRE_Int Solve_err_flag = 0;
HYPRE_Int num_nonzeros_diag;
HYPRE_Int num_nonzeros_offd;
hypre_Vector **l1_norms_ptr = NULL;
/*HYPRE_Real *relax_weight = NULL;
HYPRE_Int relax_type; */
HYPRE_Int add_rlx;
HYPRE_Int add_last_lvl, add_end;
HYPRE_Real add_rlx_wt;
/* Acquire data and allocate storage */
A_array = hypre_ParAMGDataAArray(amg_data);
F_array = hypre_ParAMGDataFArray(amg_data);
U_array = hypre_ParAMGDataUArray(amg_data);
additive = hypre_ParAMGDataAdditive(amg_data);
mult_additive = hypre_ParAMGDataMultAdditive(amg_data);
add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data);
num_levels = hypre_ParAMGDataNumLevels(amg_data);
/*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data);
relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[1];*/
comm = hypre_ParCSRMatrixComm(A_array[0]);
add_rlx = hypre_ParAMGDataAddRelaxType(amg_data);
add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data);
ns = hypre_ParAMGDataNumGridSweeps(amg_data)[1];
hypre_MPI_Comm_size(comm,&num_procs);
l1_norms_ptr = hypre_ParAMGDataL1Norms(amg_data);
addlvl = hypre_max(additive, mult_additive);
if (add_last_lvl != -1) add_end = add_last_lvl+1;
else add_end = num_levels;
num_add_lvls = add_end+1-addlvl;
level_start = hypre_CTAlloc(HYPRE_Int, num_add_lvls+1, HYPRE_MEMORY_HOST);
send_data_L = 0;
num_rows_L = 0;
num_cols_offd_L = 0;
num_nonzeros_diag = 0;
num_nonzeros_offd = 0;
level_start[0] = 0;
cnt = 1;
max_sends = 0;
max_recvs = 0;
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp);
A_tmp_offd = hypre_ParCSRMatrixOffd(A_tmp);
A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag);
A_tmp_offd_i = hypre_CSRMatrixI(A_tmp_offd);
num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag);
num_cols_offd = hypre_CSRMatrixNumCols(A_tmp_offd);
num_rows_L += num_rows_tmp;
level_start[cnt] = level_start[cnt-1] + num_rows_tmp;
cnt++;
num_cols_offd_L += num_cols_offd;
num_nonzeros_diag += A_tmp_diag_i[num_rows_tmp];
num_nonzeros_offd += A_tmp_offd_i[num_rows_tmp];
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
max_sends += num_sends;
if (num_sends)
send_data_L += hypre_ParCSRCommPkgSendMapStart(comm_pkg,num_sends);
max_recvs += hypre_ParCSRCommPkgNumRecvs(comm_pkg);
}
}
if (max_sends >= num_procs ||max_recvs >= num_procs)
{
max_sends = num_procs;
max_recvs = num_procs;
}
if (max_sends) all_send_procs = hypre_CTAlloc(HYPRE_Int, max_sends, HYPRE_MEMORY_HOST);
if (max_recvs) all_recv_procs = hypre_CTAlloc(HYPRE_Int, max_recvs, HYPRE_MEMORY_HOST);
cnt_send = 0;
cnt_recv = 0;
if (max_sends || max_recvs)
{
if (max_sends < num_procs && max_recvs < num_procs)
{
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
for (j = 0; j < num_sends; j++)
all_send_procs[cnt_send++] = send_procs[j];
for (j = 0; j < num_recvs; j++)
all_recv_procs[cnt_recv++] = recv_procs[j];
}
}
if (max_sends)
{
hypre_qsort0(all_send_procs, 0, max_sends-1);
num_sends_L = 1;
this_proc = all_send_procs[0];
for (i=1; i < max_sends; i++)
{
if (all_send_procs[i] > this_proc)
{
this_proc = all_send_procs[i];
all_send_procs[num_sends_L++] = this_proc;
}
}
L_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST);
for (j=0; j < num_sends_L; j++)
L_send_procs[j] = all_send_procs[j];
hypre_TFree(all_send_procs, HYPRE_MEMORY_HOST);
}
if (max_recvs)
{
hypre_qsort0(all_recv_procs, 0, max_recvs-1);
num_recvs_L = 1;
this_proc = all_recv_procs[0];
for (i=1; i < max_recvs; i++)
{
if (all_recv_procs[i] > this_proc)
{
this_proc = all_recv_procs[i];
all_recv_procs[num_recvs_L++] = this_proc;
}
}
L_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST);
for (j=0; j < num_recvs_L; j++)
L_recv_procs[j] = all_recv_procs[j];
hypre_TFree(all_recv_procs, HYPRE_MEMORY_HOST);
}
L_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs_L+1, HYPRE_MEMORY_HOST);
L_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends_L+1, HYPRE_MEMORY_HOST);
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
}
else
{
num_sends = 0;
num_recvs = 0;
}
for (k = 0; k < num_sends; k++)
{
this_proc = hypre_BinarySearch(L_send_procs,send_procs[k],num_sends_L);
L_send_ptr[this_proc+1] += send_map_starts[k+1]-send_map_starts[k];
}
for (k = 0; k < num_recvs; k++)
{
this_proc = hypre_BinarySearch(L_recv_procs,recv_procs[k],num_recvs_L);
L_recv_ptr[this_proc+1] += recv_vec_starts[k+1]-recv_vec_starts[k];
}
}
L_recv_ptr[0] = 0;
for (i=1; i < num_recvs_L; i++)
L_recv_ptr[i+1] += L_recv_ptr[i];
L_send_ptr[0] = 0;
for (i=1; i < num_sends_L; i++)
L_send_ptr[i+1] += L_send_ptr[i];
}
else
{
num_recvs_L = 0;
num_sends_L = 0;
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
for (j = 0; j < num_sends; j++)
{
this_proc = send_procs[j];
if (all_send_procs[this_proc] == 0)
num_sends_L++;
all_send_procs[this_proc] += send_map_starts[j+1]-send_map_starts[j];
}
for (j = 0; j < num_recvs; j++)
{
this_proc = recv_procs[j];
if (all_recv_procs[this_proc] == 0)
num_recvs_L++;
all_recv_procs[this_proc] += recv_vec_starts[j+1]-recv_vec_starts[j];
}
}
}
if (max_sends)
{
L_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST);
L_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends_L+1, HYPRE_MEMORY_HOST);
num_sends_L = 0;
for (j=0; j < num_procs; j++)
{
this_proc = all_send_procs[j];
if (this_proc)
{
L_send_procs[num_sends_L++] = j;
L_send_ptr[num_sends_L] = this_proc + L_send_ptr[num_sends_L-1];
}
}
}
if (max_recvs)
{
L_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST);
L_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs_L+1, HYPRE_MEMORY_HOST);
num_recvs_L = 0;
for (j=0; j < num_procs; j++)
{
this_proc = all_recv_procs[j];
if (this_proc)
{
L_recv_procs[num_recvs_L++] = j;
L_recv_ptr[num_recvs_L] = this_proc + L_recv_ptr[num_recvs_L-1];
}
}
}
}
}
if (max_sends) hypre_TFree(all_send_procs, HYPRE_MEMORY_HOST);
if (max_recvs) hypre_TFree(all_recv_procs, HYPRE_MEMORY_HOST);
L_diag = hypre_CSRMatrixCreate(num_rows_L, num_rows_L, num_nonzeros_diag);
L_offd = hypre_CSRMatrixCreate(num_rows_L, num_cols_offd_L, num_nonzeros_offd);
hypre_CSRMatrixInitialize(L_diag);
hypre_CSRMatrixInitialize(L_offd);
if (num_nonzeros_diag)
{
L_diag_data = hypre_CSRMatrixData(L_diag);
L_diag_j = hypre_CSRMatrixJ(L_diag);
}
L_diag_i = hypre_CSRMatrixI(L_diag);
if (num_nonzeros_offd)
{
L_offd_data = hypre_CSRMatrixData(L_offd);
L_offd_j = hypre_CSRMatrixJ(L_offd);
}
L_offd_i = hypre_CSRMatrixI(L_offd);
if (ns > 1)
{
Atilde_diag = hypre_CSRMatrixCreate(num_rows_L, num_rows_L, num_nonzeros_diag);
Atilde_offd = hypre_CSRMatrixCreate(num_rows_L, num_cols_offd_L, num_nonzeros_offd);
hypre_CSRMatrixInitialize(Atilde_diag);
hypre_CSRMatrixInitialize(Atilde_offd);
if (num_nonzeros_diag)
{
Atilde_diag_data = hypre_CSRMatrixData(Atilde_diag);
Atilde_diag_j = hypre_CSRMatrixJ(Atilde_diag);
}
Atilde_diag_i = hypre_CSRMatrixI(Atilde_diag);
if (num_nonzeros_offd)
{
Atilde_offd_data = hypre_CSRMatrixData(Atilde_offd);
Atilde_offd_j = hypre_CSRMatrixJ(Atilde_offd);
}
Atilde_offd_i = hypre_CSRMatrixI(Atilde_offd);
}
if (num_rows_L) D_data = hypre_CTAlloc(HYPRE_Real, num_rows_L, HYPRE_MEMORY_HOST);
if (send_data_L)
{
L_send_map_elmts = hypre_CTAlloc(HYPRE_Int, send_data_L, HYPRE_MEMORY_HOST);
buf_data = hypre_CTAlloc(HYPRE_Real, send_data_L, HYPRE_MEMORY_HOST);
}
if (num_cols_offd_L)
{
D_data_offd = hypre_CTAlloc(HYPRE_Real, num_cols_offd_L, HYPRE_MEMORY_HOST);
/*L_col_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_L);*/
remap = hypre_CTAlloc(HYPRE_Int, num_cols_offd_L, HYPRE_MEMORY_HOST);
}
Rtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
Rtilde_local = hypre_SeqVectorCreate(num_rows_L);
hypre_SeqVectorInitialize(Rtilde_local);
hypre_ParVectorLocalVector(Rtilde) = Rtilde_local;
hypre_ParVectorOwnsData(Rtilde) = 1;
Xtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
Xtilde_local = hypre_SeqVectorCreate(num_rows_L);
hypre_SeqVectorInitialize(Xtilde_local);
hypre_ParVectorLocalVector(Xtilde) = Xtilde_local;
hypre_ParVectorOwnsData(Xtilde) = 1;
x_data = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde));
r_data = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde));
cnt = 0;
cnt_level = 0;
cnt_diag = 0;
cnt_offd = 0;
cnt_row = 1;
L_diag_i[0] = 0;
L_offd_i[0] = 0;
if (ns > 1)
{
A_cnt_diag = 0;
A_cnt_offd = 0;
Atilde_diag_i[0] = 0;
Atilde_offd_i[0] = 0;
}
for (level=addlvl; level < add_end; level++)
{
row_start = level_start[cnt_level];
if (level != 0)
{
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(F_array[level]));
if (tmp_data)
{
hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(F_array[level])));
}
hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])) = &r_data[row_start];
hypre_VectorOwnsData(hypre_ParVectorLocalVector(F_array[level])) = 0;
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[level]));
if (tmp_data)
{
hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(U_array[level])));
}
hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])) = &x_data[row_start];
hypre_VectorOwnsData(hypre_ParVectorLocalVector(U_array[level])) = 0;
}
cnt_level++;
start_diag = L_diag_i[cnt_row-1];
start_offd = L_offd_i[cnt_row-1];
A_tmp = A_array[level];
A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp);
A_tmp_offd = hypre_ParCSRMatrixOffd(A_tmp);
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag);
A_tmp_offd_i = hypre_CSRMatrixI(A_tmp_offd);
A_tmp_diag_j = hypre_CSRMatrixJ(A_tmp_diag);
A_tmp_offd_j = hypre_CSRMatrixJ(A_tmp_offd);
A_tmp_diag_data = hypre_CSRMatrixData(A_tmp_diag);
A_tmp_offd_data = hypre_CSRMatrixData(A_tmp_offd);
num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
}
else
{
num_sends = 0;
num_recvs = 0;
}
/* Compute new combined communication package */
for (i=0; i < num_sends; i++)
{
this_proc = hypre_BinarySearch(L_send_procs,send_procs[i],num_sends_L);
indx = L_send_ptr[this_proc];
for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++)
{
L_send_map_elmts[indx++] = row_start + send_map_elmts[j];
}
L_send_ptr[this_proc] = indx;
}
cnt_map = 0;
for (i = 0; i < num_recvs; i++)
{
this_proc = hypre_BinarySearch(L_recv_procs,recv_procs[i],num_recvs_L);
indx = L_recv_ptr[this_proc];
for (j=recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++)
{
remap[cnt_map++] = indx++;
}
L_recv_ptr[this_proc] = indx;
}
/* Compute Lambda */
if (add_rlx == 0)
{
/*HYPRE_Real rlx_wt = relax_weight[level];*/
#ifdef HYPRE_USING_OPENMP
#pragma omp for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_rows_tmp; i++)
{
D_data[i] = add_rlx_wt/A_tmp_diag_data[A_tmp_diag_i[i]];
L_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1];
L_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1];
}
if (ns > 1)
for (i=0; i < num_rows_tmp; i++)
{
Atilde_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1];
Atilde_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1];
}
}
else
{
l1_norms = l1_norms_ptr[level];
#ifdef HYPRE_USING_OPENMP
#pragma omp for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_rows_tmp; i++)
{
D_data[i] = 1.0 / hypre_VectorData(l1_norms)[i];
L_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1];
L_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1];
}
if (ns > 1)
{
for (i=0; i < num_rows_tmp; i++)
{
Atilde_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1];
Atilde_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1];
}
}
}
if (num_procs > 1)
{
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_starts[i];
for (j=start; j < send_map_starts[i+1]; j++)
buf_data[index++] = D_data[send_map_elmts[j]];
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg,
buf_data, D_data_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
for (i = 0; i < num_rows_tmp; i++)
{
j_indx = A_tmp_diag_i[i];
if (ns > 1)
{
Atilde_diag_data[A_cnt_diag] = A_tmp_diag_data[j_indx];
Atilde_diag_j[A_cnt_diag++] = i+row_start;
}
L_diag_data[cnt_diag] = (2.0 - A_tmp_diag_data[j_indx]*D_data[i])*D_data[i];
L_diag_j[cnt_diag++] = i+row_start;
for (j=A_tmp_diag_i[i]+1; j < A_tmp_diag_i[i+1]; j++)
{
j_indx = A_tmp_diag_j[j];
L_diag_data[cnt_diag] = (- A_tmp_diag_data[j]*D_data[j_indx])*D_data[i];
L_diag_j[cnt_diag++] = j_indx+row_start;
}
for (j=A_tmp_offd_i[i]; j < A_tmp_offd_i[i+1]; j++)
{
j_indx = A_tmp_offd_j[j];
L_offd_data[cnt_offd] = (- A_tmp_offd_data[j]*D_data_offd[j_indx])*D_data[i];
L_offd_j[cnt_offd++] = remap[j_indx];
}
if (ns > 1)
{
for (j=A_tmp_diag_i[i]+1; j < A_tmp_diag_i[i+1]; j++)
{
j_indx = A_tmp_diag_j[j];
Atilde_diag_data[A_cnt_diag] = A_tmp_diag_data[j];
Atilde_diag_j[A_cnt_diag++] = j_indx+row_start;
}
for (j=A_tmp_offd_i[i]; j < A_tmp_offd_i[i+1]; j++)
{
j_indx = A_tmp_offd_j[j];
Atilde_offd_data[A_cnt_offd] = A_tmp_offd_data[j];
Atilde_offd_j[A_cnt_offd++] = remap[j_indx];
}
}
}
cnt_row += num_rows_tmp;
}
if (L_send_ptr)
{
for (i=num_sends_L-1; i > 0; i--)
L_send_ptr[i] = L_send_ptr[i-1];
L_send_ptr[0] = 0;
}
else
L_send_ptr = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
if (L_recv_ptr)
{
for (i=num_recvs_L-1; i > 0; i--)
L_recv_ptr[i] = L_recv_ptr[i-1];
L_recv_ptr[0] = 0;
}
else
L_recv_ptr = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
L_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgNumRecvs(L_comm_pkg) = num_recvs_L;
hypre_ParCSRCommPkgNumSends(L_comm_pkg) = num_sends_L;
hypre_ParCSRCommPkgRecvProcs(L_comm_pkg) = L_recv_procs;
hypre_ParCSRCommPkgSendProcs(L_comm_pkg) = L_send_procs;
hypre_ParCSRCommPkgRecvVecStarts(L_comm_pkg) = L_recv_ptr;
hypre_ParCSRCommPkgSendMapStarts(L_comm_pkg) = L_send_ptr;
hypre_ParCSRCommPkgSendMapElmts(L_comm_pkg) = L_send_map_elmts;
hypre_ParCSRCommPkgComm(L_comm_pkg) = comm;
Lambda = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDiag(Lambda) = L_diag;
hypre_ParCSRMatrixOffd(Lambda) = L_offd;
hypre_ParCSRMatrixCommPkg(Lambda) = L_comm_pkg;
hypre_ParCSRMatrixComm(Lambda) = comm;
hypre_ParCSRMatrixOwnsData(Lambda) = 1;
if (ns > 1)
{
/*hypre_ParCSRCommPkg *A_comm_pkg = NULL;
HYPRE_Int *A_recv_ptr = NULL;
HYPRE_Int *A_send_ptr = NULL;
HYPRE_Int *A_recv_procs = NULL;
HYPRE_Int *A_send_procs = NULL;
HYPRE_Int *A_send_map_elmts = NULL;
A_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
A_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
A_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
A_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST);
A_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST);
A_send_map_elmts = hypre_CTAlloc(HYPRE_Int, L_send_ptr[num_sends_L], HYPRE_MEMORY_HOST);
for (i=0; i<num_recvs_L+1; i++)
A_recv_ptr[i] = L_recv_ptr[i];
for (i=0; i<num_sends_L+1; i++)
A_send_ptr[i] = L_send_ptr[i];
for (i=0; i<num_recvs_L; i++)
A_recv_procs[i] = L_recv_procs[i];
for (i=0; i<num_sends_L; i++)
A_send_procs[i] = L_send_procs[i];
for (i=0; i < L_send_ptr[num_sends_L]; i++)
A_send_map_elmts[i] = L_send_map_elmts[i];
hypre_ParCSRCommPkgNumRecvs(A_comm_pkg) = num_recvs_L;
hypre_ParCSRCommPkgNumSends(A_comm_pkg) = num_sends_L;
hypre_ParCSRCommPkgRecvProcs(A_comm_pkg) = A_recv_procs;
hypre_ParCSRCommPkgSendProcs(A_comm_pkg) = A_send_procs;
hypre_ParCSRCommPkgRecvVecStarts(A_comm_pkg) = A_recv_ptr;
hypre_ParCSRCommPkgSendMapStarts(A_comm_pkg) = A_send_ptr;
hypre_ParCSRCommPkgSendMapElmts(A_comm_pkg) = A_send_map_elmts;
hypre_ParCSRCommPkgComm(A_comm_pkg) = comm; */
Atilde = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDiag(Atilde) = Atilde_diag;
hypre_ParCSRMatrixOffd(Atilde) = Atilde_offd;
hypre_ParCSRMatrixCommPkg(Atilde) = L_comm_pkg;
hypre_ParCSRMatrixComm(Atilde) = comm;
hypre_ParCSRMatrixOwnsData(Atilde) = 1;
hypre_ParAMGDataAtilde(amg_data) = Atilde;
}
hypre_ParAMGDataLambda(amg_data) = Lambda;
hypre_ParAMGDataRtilde(amg_data) = Rtilde;
hypre_ParAMGDataXtilde(amg_data) = Xtilde;
hypre_TFree(D_data_offd, HYPRE_MEMORY_HOST);
hypre_TFree(D_data, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_TFree(buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(remap, HYPRE_MEMORY_HOST);
hypre_TFree(buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(level_start, HYPRE_MEMORY_HOST);
return Solve_err_flag;
}
HYPRE_Int hypre_CreateDinv(void *amg_vdata)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata;
/* Data Structure variables */
hypre_ParCSRMatrix **A_array;
hypre_ParVector **F_array;
hypre_ParVector **U_array;
hypre_ParCSRMatrix *A_tmp;
hypre_CSRMatrix *A_tmp_diag;
hypre_ParVector *Xtilde;
hypre_ParVector *Rtilde;
hypre_Vector *Xtilde_local;
hypre_Vector *Rtilde_local;
HYPRE_Real *x_data;
HYPRE_Real *r_data;
HYPRE_Real *tmp_data;
HYPRE_Real *D_inv = NULL;
/*HYPRE_Real *relax_weight = NULL;
HYPRE_Real relax_type;*/
HYPRE_Int addlvl;
HYPRE_Int num_levels;
HYPRE_Int num_rows_L;
HYPRE_Int num_rows_tmp;
HYPRE_Int level, i;
HYPRE_Int add_rlx;
HYPRE_Real add_rlx_wt;
HYPRE_Int add_last_lvl, add_end;
/* Local variables */
HYPRE_Int Solve_err_flag = 0;
hypre_Vector **l1_norms_ptr = NULL;
hypre_Vector *l1_norms;
HYPRE_Int l1_start;
/* Acquire data and allocate storage */
A_array = hypre_ParAMGDataAArray(amg_data);
F_array = hypre_ParAMGDataFArray(amg_data);
U_array = hypre_ParAMGDataUArray(amg_data);
addlvl = hypre_ParAMGDataSimple(amg_data);
num_levels = hypre_ParAMGDataNumLevels(amg_data);
add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data);
add_rlx = hypre_ParAMGDataAddRelaxType(amg_data);
add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data);
/*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data);
relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[1];*/
l1_norms_ptr = hypre_ParAMGDataL1Norms(amg_data);
/* smooth_option = hypre_ParAMGDataSmoothOption(amg_data); */
if (add_last_lvl == -1 ) add_end = num_levels;
else add_end = add_last_lvl;
num_rows_L = 0;
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp);
num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag);
num_rows_L += num_rows_tmp;
}
Rtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
Rtilde_local = hypre_SeqVectorCreate(num_rows_L);
hypre_SeqVectorInitialize(Rtilde_local);
hypre_ParVectorLocalVector(Rtilde) = Rtilde_local;
hypre_ParVectorOwnsData(Rtilde) = 1;
Xtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST);
Xtilde_local = hypre_SeqVectorCreate(num_rows_L);
hypre_SeqVectorInitialize(Xtilde_local);
hypre_ParVectorLocalVector(Xtilde) = Xtilde_local;
hypre_ParVectorOwnsData(Xtilde) = 1;
x_data = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde));
r_data = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde));
D_inv = hypre_CTAlloc(HYPRE_Real, num_rows_L, HYPRE_MEMORY_HOST);
l1_start = 0;
for (level=addlvl; level < add_end; level++)
{
if (level != 0)
{
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(F_array[level]));
if (tmp_data)
{
hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(F_array[level])));
}
hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])) = &r_data[l1_start];
hypre_VectorOwnsData(hypre_ParVectorLocalVector(F_array[level])) = 0;
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[level]));
if (tmp_data)
{
hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(U_array[level])));
}
hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])) = &x_data[l1_start];
hypre_VectorOwnsData(hypre_ParVectorLocalVector(U_array[level])) = 0;
}
A_tmp = A_array[level];
A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp);
num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag);
if (add_rlx == 0)
{
/*HYPRE_Real rlx_wt = relax_weight[level];*/
HYPRE_Int *A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag);
HYPRE_Real *A_tmp_diag_data = hypre_CSRMatrixData(A_tmp_diag);
#ifdef HYPRE_USING_OPENMP
#pragma omp for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_rows_tmp; i++)
{
D_inv[l1_start+i] = add_rlx_wt/A_tmp_diag_data[A_tmp_diag_i[i]];
}
}
else
{
l1_norms = l1_norms_ptr[level];
#ifdef HYPRE_USING_OPENMP
#pragma omp for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_rows_tmp; i++)
{
D_inv[l1_start+i] = 1.0 / hypre_VectorData(l1_norms)[i];
}
}
l1_start += num_rows_tmp;
}
hypre_ParAMGDataDinv(amg_data) = D_inv;
hypre_ParAMGDataRtilde(amg_data) = Rtilde;
hypre_ParAMGDataXtilde(amg_data) = Xtilde;
return Solve_err_flag;
}
|
urandom.c | /****************************************************************************/
/* */
/* This file is part of CONCORDE */
/* */
/* (c) Copyright 1995--1999 by David Applegate, Robert Bixby, */
/* Vasek Chvatal, and William Cook */
/* */
/* Permission is granted for academic research use. For other uses, */
/* contact the authors for licensing options. */
/* */
/* Use at your own risk. We make no guarantees about the */
/* correctness or usefulness of this code. */
/* */
/****************************************************************************/
/****************************************************************************/
/* */
/* MACHINE INDEPENDENT RANDOM NUMBER GENERATOR */
/* */
/* TSP CODE */
/* */
/* */
/* Written by: DIMACS (modified for TSP) */
/* Date: February 7, 1995 (cofeb16) */
/* September 18, 2001 (billenium fix) */
/* */
/* EXPORTED FUNCTIONS: */
/* */
/* void CCutil_sprand (int seed, CCrandstate *r) */
/* - Call once to initialize the generator. */
/* */
/* int CCutil_lprand (CCrandstate *r) */
/* - Returns an integer in the range 0 to CC_PRANDMAX - 1. */
/* */
/* double CCutil_normrand (CCrandstate *r) */
/* - Returns a normally-distributed random value with mean 0 and */
/* deviation 1. */
/* */
/****************************************************************************/
/****************************************************************************/
/* */
/* NOTES (from DIMACS): */
/* This file contains a set of c-language functions for generating */
/* uniform integers. This is a COMPLETELY PORTABLE generator. It will */
/* give IDENTICAL sequences of random numbers for any architecture with */
/* at least 30-bit integers, regardless of the integer representation, */
/* INT_MAX value, or roundoff/truncation method, etc. */
/* This Truly Remarkable RNG is described more fully in */
/* J. Bentley's column, ``The Software Exploratorium ''. It is based on */
/* one in Knuth, Vol 2, Section 3.2.2 (Algorithm A). */
/* */
/* CCutil_normrand is not from DIMACS or Bentley, but rather just uses */
/* the Box-Muller transformation to generate a normally-distributed */
/* random variable from two uniform ones. */
/* */
/****************************************************************************/
#include "machdefs.h"
#include "util.h"
void CCutil_sprand (int seed, CCrandstate *r)
{
int i, ii;
int last, next;
int *arr = r->arr;
seed %= CC_PRANDMAX;
if (seed < 0) seed += CC_PRANDMAX;
arr[0] = last = seed;
next = 1;
for (i = 1; i < 55; i++) {
ii = (21 * i) % 55;
arr[ii] = next;
next = last - next;
if (next < 0)
next += CC_PRANDMAX;
last = arr[ii];
}
r->a = 0;
r->b = 24;
for (i = 0; i < 165; i++)
last = CCutil_lprand (r);
}
int CCutil_lprand (CCrandstate *r)
{
int t;
#ifdef _OPENMP
#pragma omp critical
{
#endif
if (r->a-- == 0)
r->a = 54;
if (r->b-- == 0)
r->b = 54;
t = r->arr[r->a] - r->arr[r->b];
if (t < 0)
t += CC_PRANDMAX;
r->arr[r->a] = t;
#ifdef _OPENMP
}
#endif
return t;
}
#ifdef TRY_CODE
/*-----------------------------------------------*/
/* This is a little driver program so you can */
/* test the code. */
/* Typing: a.out 0 3 1 */
/* should produce */
/* 921674862 */
/* 250065336 */
/* 377506581 */
/* Typing: a.out 1000000 1 2 */
/* should produce */
/* 57265995 */
/*-----------------------------------------------*/
int main (int ac, char **av)
{
int i;
int j;
int n;
int m;
int seed;
CCrandstate rstate;
if (ac < 4) {
fprintf (stderr, "Usage: #discard #print #seed\n");
return 0;
}
m = atoi (av[1]); /* Number to discard initially */
n = atoi (av[2]); /* Number to print */
seed = atoi (av[3]); /* Seed */
CCutil_sprand (seed, &rstate);
for (i = 0; i < m; i++)
j = CCutil_lprand (&rstate);
for (i = 0; i < n; i++)
printf ("%ld\n", CCutil_lprand (&rstate));
return 0;
}
#endif /* TRY_CODE */
double CCutil_normrand (CCrandstate *r)
{
double x1 = ((double) CCutil_lprand(r)) / ((double) CC_PRANDMAX);
double x2 = ((double) CCutil_lprand(r)) / ((double) CC_PRANDMAX);
return sqrt (-2*log(x1)) * cos(2*M_PI*x2);
}
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _DoublePixelPacket
{
double
red,
green,
blue,
alpha;
} DoublePixelPacket;
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
double
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
double
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
double
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *,ExceptionInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *,ExceptionInfo *),
SetGrayscaleImage(Image *,ExceptionInfo *);
static size_t
DefineImageColormap(Image *,CubeInfo *,NodeInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info));
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither_method=image_info->dither == MagickFalse ?
NoDitherMethod : RiemersmaDitherMethod;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const Image *image,
const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(GetPixelAlpha(image,pixel) == OpaqueAlpha))
{
alpha_pixel->red=(double) GetPixelRed(image,pixel);
alpha_pixel->green=(double) GetPixelGreen(image,pixel);
alpha_pixel->blue=(double) GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel));
alpha_pixel->red=alpha*GetPixelRed(image,pixel);
alpha_pixel->green=alpha*GetPixelGreen(image,pixel);
alpha_pixel->blue=alpha*GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
}
static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info,
const PixelInfo *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->alpha == OpaqueAlpha))
{
alpha_pixel->red=(double) pixel->red;
alpha_pixel->green=(double) pixel->green;
alpha_pixel->blue=(double) pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
return;
}
alpha=(double) (QuantumScale*pixel->alpha);
alpha_pixel->red=alpha*pixel->red;
alpha_pixel->green=alpha*pixel->green;
alpha_pixel->blue=alpha*pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) |
((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 |
((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3;
return(id);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace,
exception);
if (AcquireImageColormap(image,cube_info->colors,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
(void) DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if (cube_info->quantize_info->dither_method != NoDitherMethod)
(void) DitherImage(image,cube_info,exception);
else
{
CacheView
*image_view;
MagickBooleanType
status;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,q,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(
image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(
image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(
image->colormap[index].blue),q);
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(
image->colormap[index].alpha),q);
}
q+=GetPixelChannels(image);
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image,exception);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=0.0;
if ((image->colors > 1) &&
(GetPixelInfoLuma(image->colormap+0) >
GetPixelInfoLuma(image->colormap+1)))
intensity=(double) QuantumRange;
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image,exception);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace,exception);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue :
MagickFalse;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
double
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace,exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
midpoint.red=(double) QuantumRange/2.0;
midpoint.green=(double) QuantumRange/2.0;
midpoint.blue=(double) QuantumRange/2.0;
midpoint.alpha=(double) QuantumRange/2.0;
error.alpha=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance))
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != MagickFalse)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
pixel;
register double
alpha,
beta,
distance;
register DoublePixelPacket
*magick_restrict q;
register PixelInfo
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(double) (QuantumScale*p->alpha);
beta=(double) (QuantumScale*q->alpha);
}
pixel=alpha*p->red-beta*q->red;
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->green-beta*q->green;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->blue-beta*q->blue;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=p->alpha-q->alpha;
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image,
ExceptionInfo *exception)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero. DefineImageColormap() returns the number of
% colors in the image colormap.
%
% The format of the DefineImageColormap method is:
%
% size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
(void) DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register double
alpha;
register PixelInfo
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(double) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
q->alpha=(double) OpaqueAlpha;
}
else
{
double
opacity;
opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha);
q->alpha=(double) ClampToQuantum(opacity);
if (q->alpha == OpaqueAlpha)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
}
else
{
double
gamma;
gamma=(double) (QuantumScale*q->alpha);
gamma=PerceptibleReciprocal(gamma);
q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.blue);
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
return(image->colors);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2*
sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
const char
*artifact;
double
amount;
DoublePixelPacket
**pixels;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
status=MagickTrue;
amount=1.0;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
amount=StringToDoubleInterval(artifact,1.0);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register Quantum
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel);
if (x > 0)
{
pixel.red+=7.0*amount*current[u-v].red/16;
pixel.green+=7.0*amount*current[u-v].green/16;
pixel.blue+=7.0*amount*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=7.0*amount*current[u-v].alpha/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=previous[u+v].alpha/16;
}
pixel.red+=5.0*amount*previous[u].red/16;
pixel.green+=5.0*amount*previous[u].green/16;
pixel.blue+=5.0*amount*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=5.0*amount*previous[u].alpha/16;
if (x > 0)
{
pixel.red+=3.0*amount*previous[u-v].red/16;
pixel.green+=3.0*amount*previous[u-v].green/16;
pixel.blue+=3.0*amount*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=3.0*amount*previous[u-v].alpha/16;
}
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
node_id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
node_id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[node_id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[node_id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image));
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),
q+u*GetPixelChannels(image));
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),
q+u*GetPixelChannels(image));
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),
q+u*GetPixelChannels(image));
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),
q+u*GetPixelChannels(image));
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixelInfo(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].alpha=pixel.alpha-color.alpha;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int,
ExceptionInfo *);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction,ExceptionInfo *exception)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
register Quantum
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
AssociateAlphaPixel(image,cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha+=p->weights[i]*p->error[i].alpha;
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(double) (4.0*(QuantumRange+1.0)*((double)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) p->cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q);
if (cube_info->associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info,exception));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
double
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither_method == NoDitherMethod)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(image,p);
if (image->alpha_trait == BlendPixelTrait)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*image->colormap[index].alpha);
}
distance=fabs((double) (alpha*GetPixelRed(image,p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const DitherMethod dither_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither_method: choose from UndefinedDitherMethod, NoDitherMethod,
% RiemersmaDitherMethod, FloydSteinbergDitherMethod.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const DitherMethod dither_method,ExceptionInfo *exception)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \
MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double)
PosterizePixel(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double)
PosterizePixel(image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double)
PosterizePixel(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double)
PosterizePixel(image->colormap[i].alpha);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait == BlendPixelTrait))
SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither_method=dither_method;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
if (cube_info->nodes > cube_info->maximum_colors)
{
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.alpha+=node_info->total_color.alpha;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->alpha_trait != BlendPixelTrait)
{
if (SetImageGray(image,exception) != MagickFalse)
(void) SetGrayscaleImage(image,exception);
}
if ((quantize_info->dither_method == NoDitherMethod) &&
(image->storage_class == PseudoClass) &&
(image->colors <= maximum_colors))
{
if ((quantize_info->colorspace != UndefinedColorspace) &&
(quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace(image,quantize_info->colorspace,
exception);
return(MagickTrue);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2))
depth--;
if ((image->alpha_trait == BlendPixelTrait) && (depth > 5))
depth--;
if (SetImageGray(image,exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images,exception);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither_method != NoDitherMethod)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% double *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,double *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int QuantizeErrorCompare(const void *error_p,const void *error_q)
{
double
*p,
*q;
p=(double *) error_p;
q=(double *) error_q;
if (*p > *q)
return(1);
if (fabs(*q-*p) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
double
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (double *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(double),
QuantizeErrorCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(double *) RelinquishMagickMemory(quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest of the colors
% from the reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images,exception);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelInfo
*color_1,
*color_2;
color_1=(PixelInfo *) x;
color_2=(PixelInfo *) y;
intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)-
GetPixelInfoIntensity((const Image *) NULL,color_2);
if (intensity < (double) INT_MIN)
intensity=(double) INT_MIN;
if (intensity > (double) INT_MAX)
intensity=(double) INT_MAX;
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
*colormap;
register ssize_t
i;
size_t
extent;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace,exception);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(image,q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=(double)
GetPixelRed(image,q);
image->colormap[image->colors].green=(double)
GetPixelGreen(image,q);
image->colormap[image->colors].blue=(double)
GetPixelBlue(image,q);
image->colors++;
}
}
SetPixelIndex(image,(Quantum) colormap_index[intensity],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelInfo),
IntensityCompare);
colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap));
if (colormap == (PixelInfo *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].alpha]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap(
GetPixelIndex(image,q))],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
dist.h | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/libxsmm/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Dhiraj Kalamkar (Intel Corp.)
******************************************************************************/
#ifndef _DIST_H_
#define _DIST_H_
#ifdef USE_MPI
#include <mpi.h>
void dist_init(int*argc, char ***argv)
{
MPI_Init(argc, argv);
}
void dist_fini()
{
MPI_Finalize();
}
int dist_get_rank()
{
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
return rank;
}
int dist_get_size()
{
int size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
return size;
}
void dist_barrier()
{
MPI_Barrier(MPI_COMM_WORLD);
}
void dist_alltoall(int count, float* sendbuf, float*recvbuf)
{
MPI_Alltoall(sendbuf, count, MPI_FLOAT, recvbuf, count, MPI_FLOAT, MPI_COMM_WORLD);
}
#elif defined(USE_CCL)
#include <ccl.hpp>
static ccl::communicator_t comm;
void dist_init(int*argc, char ***argv)
{
comm = ccl::environment::instance().create_communicator();
}
void dist_fini()
{
comm.reset();
}
int dist_get_rank()
{
return comm->rank();
}
int dist_get_size()
{
return comm->size();
}
void dist_barrier()
{
comm->barrier();
}
void dist_alltoall(int count, float* sendbuf, float*recvbuf)
{
comm->alltoall(sendbuf, recvbuf, (size_t)count, ccl::datatype::dt_float)->wait();
}
#else
void dist_init(int*argc, char ***argv)
{
return;
}
void dist_fini()
{
return;
}
int dist_get_rank()
{
return 0;
}
int dist_get_size()
{
return 1;
}
void dist_barrier()
{
return;
}
void dist_alltoall(int count, float* sendbuf, float*recvbuf)
{
#pragma omp parallel for
for(int i = 0; i < count; i++)
{
recvbuf[i] = sendbuf[i];
}
}
#endif
#endif /* _DIST_H_ */
|
mypaint-tiled-surface.c | /* libmypaint - The MyPaint Brush Library
* Copyright (C) 2007-2014 Martin Renold <martinxyz@gmx.ch> et. al.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <config.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "mypaint-config.h"
#include "mypaint-tiled-surface.h"
#include "tiled-surface-private.h"
#include "helpers.h"
#include "brushmodes.h"
#include "operationqueue.h"
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
void process_tile(MyPaintTiledSurface *self, int tx, int ty);
static void
begin_atomic_default(MyPaintSurface *surface)
{
mypaint_tiled_surface_begin_atomic((MyPaintTiledSurface *)surface);
}
static void
end_atomic_default(MyPaintSurface *surface, MyPaintRectangle *roi)
{
mypaint_tiled_surface_end_atomic((MyPaintTiledSurface *)surface, roi);
}
/**
* mypaint_tiled_surface_begin_atomic: (skip)
*
* Implementation of #MyPaintSurface::being_atomic vfunc
* Note: Only intended to be used from #MyPaintTiledSurface subclasses, which should chain up to this
* if implementing their own #MyPaintSurface::begin_atomic vfunc.
* Application code should only use mypaint_surface_being_atomic()
*/
void
mypaint_tiled_surface_begin_atomic(MyPaintTiledSurface *self)
{
self->dirty_bbox.height = 0;
self->dirty_bbox.width = 0;
self->dirty_bbox.y = 0;
self->dirty_bbox.x = 0;
}
/**
* mypaint_tiled_surface_end_atomic: (skip)
*
* Implementation of #MyPaintSurface::end_atomic vfunc
* Note: Only intended to be used from #MyPaintTiledSurface subclasses, which should chain up to this
* if implementing their own #MyPaintSurface::end_atomic vfunc.
* Application code should only use mypaint_surface_end_atomic().
*/
void
mypaint_tiled_surface_end_atomic(MyPaintTiledSurface *self, MyPaintRectangle *roi)
{
// Process tiles
TileIndex *tiles;
int tiles_n = operation_queue_get_dirty_tiles(self->operation_queue, &tiles);
#pragma omp parallel for schedule(static) if(self->threadsafe_tile_requests && tiles_n > 3)
for (int i = 0; i < tiles_n; i++) {
process_tile(self, tiles[i].x, tiles[i].y);
}
operation_queue_clear_dirty_tiles(self->operation_queue);
if (roi) {
*roi = self->dirty_bbox;
}
}
/**
* mypaint_tiled_surface_tile_request_start:
*
* Fetch a tile out from the underlying tile store.
* When successful, request->data will be set to point to the fetched tile.
* Consumers must *always* call mypaint_tiled_surface_tile_request_end() with the same
* request to complete the transaction.
*/
void mypaint_tiled_surface_tile_request_start(MyPaintTiledSurface *self, MyPaintTileRequest *request)
{
assert(self->tile_request_start);
self->tile_request_start(self, request);
}
/**
* mypaint_tiled_surface_tile_request_end:
*
* Put a (potentially modified) tile back into the underlying tile store.
*
* Consumers must *always* call mypaint_tiled_surface_tile_request_start() with the same
* request to start the transaction before calling this function.
*/
void mypaint_tiled_surface_tile_request_end(MyPaintTiledSurface *self, MyPaintTileRequest *request)
{
assert(self->tile_request_end);
self->tile_request_end(self, request);
}
/* FIXME: either expose this through MyPaintSurface, or move it into the brush engine */
/**
* mypaint_tiled_surface_set_symmetry_state:
* @active: TRUE to enable, FALSE to disable.
* @center_x: X axis to mirror events across.
* @center_y: Y axis to mirror events across.
* @symmetry_type: Symmetry type to activate.
* @rot_symmetry_lines: Number of rotational symmetry lines.
*
* Enable/Disable symmetric brush painting across an X axis.
*/
void
mypaint_tiled_surface_set_symmetry_state(MyPaintTiledSurface *self, gboolean active,
float center_x, float center_y,
MyPaintSymmetryType symmetry_type,
int rot_symmetry_lines)
{
self->surface_do_symmetry = active;
self->surface_center_x = center_x;
self->surface_center_y = center_y;
self->symmetry_type = symmetry_type;
self->rot_symmetry_lines = MAX(2, rot_symmetry_lines);
}
/**
* mypaint_tile_request_init:
*
* Initialize a request for use with mypaint_tiled_surface_tile_request_start()
* and mypaint_tiled_surface_tile_request_end()
*/
void
mypaint_tile_request_init(MyPaintTileRequest *data, int level,
int tx, int ty, gboolean readonly)
{
data->tx = tx;
data->ty = ty;
data->readonly = readonly;
data->buffer = NULL;
data->context = NULL;
#ifdef _OPENMP
data->thread_id = omp_get_thread_num();
#else
data->thread_id = -1;
#endif
data->mipmap_level = level;
}
// Must be threadsafe
static inline float
calculate_r_sample(float x, float y, float aspect_ratio,
float sn, float cs)
{
const float yyr=(y*cs-x*sn)*aspect_ratio;
const float xxr=y*sn+x*cs;
const float r = (yyr*yyr + xxr*xxr);
return r;
}
static inline float
calculate_rr(int xp, int yp, float x, float y, float aspect_ratio,
float sn, float cs, float one_over_radius2)
{
// code duplication, see brush::count_dabs_to()
const float yy = (yp + 0.5f - y);
const float xx = (xp + 0.5f - x);
const float yyr=(yy*cs-xx*sn)*aspect_ratio;
const float xxr=yy*sn+xx*cs;
const float rr = (yyr*yyr + xxr*xxr) * one_over_radius2;
// rr is in range 0.0..1.0*sqrt(2)
return rr;
}
static inline float
sign_point_in_line( float px, float py, float vx, float vy )
{
return (px - vx) * (-vy) - (vx) * (py - vy);
}
static inline void
closest_point_to_line( float lx, float ly, float px, float py, float *ox, float *oy )
{
const float l2 = lx*lx + ly*ly;
const float ltp_dot = px*lx + py*ly;
const float t = ltp_dot / l2;
*ox = lx * t;
*oy = ly * t;
}
// Must be threadsafe
//
// This works by taking the visibility at the nearest point
// and dividing by 1.0 + delta.
//
// - nearest point: point where the dab has more influence
// - farthest point: point at a fixed distance away from
// the nearest point
// - delta: how much occluded is the farthest point relative
// to the nearest point
static inline float
calculate_rr_antialiased(int xp, int yp, float x, float y, float aspect_ratio,
float sn, float cs, float one_over_radius2,
float r_aa_start)
{
// calculate pixel position and borders in a way
// that the dab's center is always at zero
float pixel_right = x - (float)xp;
float pixel_bottom = y - (float)yp;
float pixel_center_x = pixel_right - 0.5f;
float pixel_center_y = pixel_bottom - 0.5f;
float pixel_left = pixel_right - 1.0f;
float pixel_top = pixel_bottom - 1.0f;
float nearest_x, nearest_y; // nearest to origin, but still inside pixel
float farthest_x, farthest_y; // farthest from origin, but still inside pixel
float r_near, r_far, rr_near, rr_far;
// Dab's center is inside pixel?
if( pixel_left<0 && pixel_right>0 &&
pixel_top<0 && pixel_bottom>0 )
{
nearest_x = 0;
nearest_y = 0;
r_near = rr_near = 0;
}
else
{
closest_point_to_line( cs, sn, pixel_center_x, pixel_center_y, &nearest_x, &nearest_y );
nearest_x = CLAMP( nearest_x, pixel_left, pixel_right );
nearest_y = CLAMP( nearest_y, pixel_top, pixel_bottom );
// XXX: precision of "nearest" values could be improved
// by intersecting the line that goes from nearest_x/Y to 0
// with the pixel's borders here, however the improvements
// would probably not justify the perdormance cost.
r_near = calculate_r_sample( nearest_x, nearest_y, aspect_ratio, sn, cs );
rr_near = r_near * one_over_radius2;
}
// out of dab's reach?
if( rr_near > 1.0f )
return rr_near;
// check on which side of the dab's line is the pixel center
float center_sign = sign_point_in_line( pixel_center_x, pixel_center_y, cs, -sn );
// radius of a circle with area=1
// A = pi * r * r
// r = sqrt(1/pi)
const float rad_area_1 = sqrtf( 1.0f / M_PI );
// center is below dab
if( center_sign < 0 )
{
farthest_x = nearest_x - sn*rad_area_1;
farthest_y = nearest_y + cs*rad_area_1;
}
// above dab
else
{
farthest_x = nearest_x + sn*rad_area_1;
farthest_y = nearest_y - cs*rad_area_1;
}
r_far = calculate_r_sample( farthest_x, farthest_y, aspect_ratio, sn, cs );
rr_far = r_far * one_over_radius2;
// check if we can skip heavier AA
if( r_far < r_aa_start )
return (rr_far+rr_near) * 0.5f;
// calculate AA approximate
float visibilityNear = 1.0f - rr_near;
float delta = rr_far - rr_near;
float delta2 = 1.0f + delta;
visibilityNear /= delta2;
return 1.0f - visibilityNear;
}
static inline float
calculate_opa(float rr, float hardness,
float segment1_offset, float segment1_slope,
float segment2_offset, float segment2_slope) {
const float fac = rr <= hardness ? segment1_slope : segment2_slope;
float opa = rr <= hardness ? segment1_offset : segment2_offset;
opa += rr*fac;
if (rr > 1.0f) {
opa = 0.0f;
}
#ifdef HEAVY_DEBUG
assert(isfinite(opa));
assert(opa >= 0.0f && opa <= 1.0f);
#endif
return opa;
}
// Must be threadsafe
void render_dab_mask (uint16_t * mask,
float x, float y,
float radius,
float hardness,
float aspect_ratio, float angle
)
{
hardness = CLAMP(hardness, 0.0, 1.0);
if (aspect_ratio<1.0) aspect_ratio=1.0;
assert(hardness != 0.0); // assured by caller
// For a graphical explanation, see:
// http://wiki.mypaint.info/Development/Documentation/Brushlib
//
// The hardness calculation is explained below:
//
// Dab opacity gradually fades out from the center (rr=0) to
// fringe (rr=1) of the dab. How exactly depends on the hardness.
// We use two linear segments, for which we pre-calculate slope
// and offset here.
//
// opa
// ^
// * .
// | *
// | .
// +-----------*> rr = (distance_from_center/radius)^2
// 0 1
//
float segment1_offset = 1.0f;
float segment1_slope = -(1.0f/hardness - 1.0f);
float segment2_offset = hardness/(1.0f-hardness);
float segment2_slope = -hardness/(1.0f-hardness);
// for hardness == 1.0, segment2 will never be used
float angle_rad=angle/360*2*M_PI;
float cs=cos(angle_rad);
float sn=sin(angle_rad);
const float r_fringe = radius + 1.0f; // +1.0 should not be required, only to be sure
int x0 = floor (x - r_fringe);
int y0 = floor (y - r_fringe);
int x1 = floor (x + r_fringe);
int y1 = floor (y + r_fringe);
if (x0 < 0) x0 = 0;
if (y0 < 0) y0 = 0;
if (x1 > MYPAINT_TILE_SIZE-1) x1 = MYPAINT_TILE_SIZE-1;
if (y1 > MYPAINT_TILE_SIZE-1) y1 = MYPAINT_TILE_SIZE-1;
const float one_over_radius2 = 1.0f/(radius*radius);
// Pre-calculate rr and put it in the mask.
// This an optimization that makes use of auto-vectorization
// OPTIMIZE: if using floats for the brush engine, store these directly in the mask
float rr_mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
if (radius < 3.0f)
{
const float aa_border = 1.0f;
float r_aa_start = ((radius>aa_border) ? (radius-aa_border) : 0);
r_aa_start *= r_aa_start / aspect_ratio;
for (int yp = y0; yp <= y1; yp++) {
for (int xp = x0; xp <= x1; xp++) {
const float rr = calculate_rr_antialiased(xp, yp,
x, y, aspect_ratio,
sn, cs, one_over_radius2,
r_aa_start);
rr_mask[(yp*MYPAINT_TILE_SIZE)+xp] = rr;
}
}
}
else
{
for (int yp = y0; yp <= y1; yp++) {
for (int xp = x0; xp <= x1; xp++) {
const float rr = calculate_rr(xp, yp,
x, y, aspect_ratio,
sn, cs, one_over_radius2);
rr_mask[(yp*MYPAINT_TILE_SIZE)+xp] = rr;
}
}
}
// we do run length encoding: if opacity is zero, the next
// value in the mask is the number of pixels that can be skipped.
uint16_t * mask_p = mask;
int skip=0;
skip += y0*MYPAINT_TILE_SIZE;
for (int yp = y0; yp <= y1; yp++) {
skip += x0;
int xp;
for (xp = x0; xp <= x1; xp++) {
const float rr = rr_mask[(yp*MYPAINT_TILE_SIZE)+xp];
const float opa = calculate_opa(rr, hardness,
segment1_offset, segment1_slope,
segment2_offset, segment2_slope);
const uint16_t opa_ = opa * (1<<15);
if (!opa_) {
skip++;
} else {
if (skip) {
*mask_p++ = 0;
*mask_p++ = skip*4;
skip = 0;
}
*mask_p++ = opa_;
}
}
skip += MYPAINT_TILE_SIZE-xp;
}
*mask_p++ = 0;
*mask_p++ = 0;
}
// Must be threadsafe
void
process_op(uint16_t *rgba_p, uint16_t *mask,
int tx, int ty, OperationDataDrawDab *op)
{
// first, we calculate the mask (opacity for each pixel)
render_dab_mask(mask,
op->x - tx*MYPAINT_TILE_SIZE,
op->y - ty*MYPAINT_TILE_SIZE,
op->radius,
op->hardness,
op->aspect_ratio, op->angle
);
// second, we use the mask to stamp a dab for each activated blend mode
if (op->paint < 1.0) {
if (op->normal) {
if (op->color_a == 1.0) {
draw_dab_pixels_BlendMode_Normal(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->normal*op->opaque*(1 - op->paint)*(1<<15));
} else {
// normal case for brushes that use smudging (eg. watercolor)
draw_dab_pixels_BlendMode_Normal_and_Eraser(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->color_a*(1<<15),
op->normal*op->opaque*(1 - op->paint)*(1<<15));
}
}
if (op->lock_alpha) {
draw_dab_pixels_BlendMode_LockAlpha(mask, rgba_p,
op->color_r, op->color_g, op->color_b,
op->lock_alpha*op->opaque*(1 - op->colorize)*(1 - op->posterize)*(1 - op->paint)*(1<<15));
}
}
if (op->paint > 0.0) {
if (op->normal) {
if (op->color_a == 1.0) {
draw_dab_pixels_BlendMode_Normal_Paint(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->normal*op->opaque*op->paint*(1<<15));
} else {
// normal case for brushes that use smudging (eg. watercolor)
draw_dab_pixels_BlendMode_Normal_and_Eraser_Paint(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->color_a*(1<<15),
op->normal*op->opaque*op->paint*(1<<15));
}
}
if (op->lock_alpha) {
draw_dab_pixels_BlendMode_LockAlpha_Paint(mask, rgba_p,
op->color_r, op->color_g, op->color_b,
op->lock_alpha*op->opaque*(1 - op->colorize)*(1 - op->posterize)*op->paint*(1<<15));
}
}
if (op->colorize) {
draw_dab_pixels_BlendMode_Color(mask, rgba_p,
op->color_r, op->color_g, op->color_b,
op->colorize*op->opaque*(1<<15));
}
if (op->posterize) {
draw_dab_pixels_BlendMode_Posterize(mask, rgba_p,
op->posterize*op->opaque*(1<<15),
op->posterize_num);
}
}
// Must be threadsafe
void
process_tile(MyPaintTiledSurface *self, int tx, int ty)
{
TileIndex tile_index = {tx, ty};
OperationDataDrawDab *op = operation_queue_pop(self->operation_queue, tile_index);
if (!op) {
return;
}
MyPaintTileRequest request_data;
const int mipmap_level = 0;
mypaint_tile_request_init(&request_data, mipmap_level, tx, ty, FALSE);
mypaint_tiled_surface_tile_request_start(self, &request_data);
uint16_t * rgba_p = request_data.buffer;
if (!rgba_p) {
printf("Warning: Unable to get tile!\n");
return;
}
uint16_t mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
while (op) {
process_op(rgba_p, mask, tile_index.x, tile_index.y, op);
free(op);
op = operation_queue_pop(self->operation_queue, tile_index);
}
mypaint_tiled_surface_tile_request_end(self, &request_data);
}
// OPTIMIZE: send a list of the exact changed rects instead of a bounding box
// to minimize the area being composited? Profile to see the effect first.
void
update_dirty_bbox(MyPaintTiledSurface *self, OperationDataDrawDab *op)
{
int bb_x, bb_y, bb_w, bb_h;
float r_fringe = op->radius + 1.0f; // +1.0 should not be required, only to be sure
bb_x = floor (op->x - r_fringe);
bb_y = floor (op->y - r_fringe);
bb_w = floor (op->x + r_fringe) - bb_x + 1;
bb_h = floor (op->y + r_fringe) - bb_y + 1;
mypaint_rectangle_expand_to_include_point(&self->dirty_bbox, bb_x, bb_y);
mypaint_rectangle_expand_to_include_point(&self->dirty_bbox, bb_x+bb_w-1, bb_y+bb_h-1);
}
// returns TRUE if the surface was modified
gboolean draw_dab_internal (MyPaintTiledSurface *self, float x, float y,
float radius,
float color_r, float color_g, float color_b,
float opaque, float hardness,
float color_a,
float aspect_ratio, float angle,
float lock_alpha,
float colorize,
float posterize,
float posterize_num,
float paint
)
{
OperationDataDrawDab op_struct;
OperationDataDrawDab *op = &op_struct;
op->x = x;
op->y = y;
op->radius = radius;
op->aspect_ratio = aspect_ratio;
op->angle = angle;
op->opaque = CLAMP(opaque, 0.0f, 1.0f);
op->hardness = CLAMP(hardness, 0.0f, 1.0f);
op->lock_alpha = CLAMP(lock_alpha, 0.0f, 1.0f);
op->colorize = CLAMP(colorize, 0.0f, 1.0f);
op->posterize = CLAMP(posterize, 0.0f, 1.0f);
op->posterize_num= CLAMP(ROUND(posterize_num * 100.0), 1, 128);
op->paint = CLAMP(paint, 0.0f, 1.0f);
if (op->radius < 0.1f) return FALSE; // don't bother with dabs smaller than 0.1 pixel
if (op->hardness == 0.0f) return FALSE; // infintly small center point, fully transparent outside
if (op->opaque == 0.0f) return FALSE;
color_r = CLAMP(color_r, 0.0f, 1.0f);
color_g = CLAMP(color_g, 0.0f, 1.0f);
color_b = CLAMP(color_b, 0.0f, 1.0f);
color_a = CLAMP(color_a, 0.0f, 1.0f);
op->color_r = color_r * (1<<15);
op->color_g = color_g * (1<<15);
op->color_b = color_b * (1<<15);
op->color_a = color_a;
// blending mode preparation
op->normal = 1.0f;
op->normal *= 1.0f-op->lock_alpha;
op->normal *= 1.0f-op->colorize;
op->normal *= 1.0f-op->posterize;
if (op->aspect_ratio<1.0f) op->aspect_ratio=1.0f;
// Determine the tiles influenced by operation, and queue it for processing for each tile
float r_fringe = radius + 1.0f; // +1.0 should not be required, only to be sure
int tx1 = floor(floor(x - r_fringe) / MYPAINT_TILE_SIZE);
int tx2 = floor(floor(x + r_fringe) / MYPAINT_TILE_SIZE);
int ty1 = floor(floor(y - r_fringe) / MYPAINT_TILE_SIZE);
int ty2 = floor(floor(y + r_fringe) / MYPAINT_TILE_SIZE);
for (int ty = ty1; ty <= ty2; ty++) {
for (int tx = tx1; tx <= tx2; tx++) {
const TileIndex tile_index = {tx, ty};
OperationDataDrawDab *op_copy = (OperationDataDrawDab *)malloc(sizeof(OperationDataDrawDab));
*op_copy = *op;
operation_queue_add(self->operation_queue, tile_index, op_copy);
}
}
update_dirty_bbox(self, op);
return TRUE;
}
// returns TRUE if the surface was modified
int draw_dab (MyPaintSurface *surface, float x, float y,
float radius,
float color_r, float color_g, float color_b,
float opaque, float hardness,
float color_a,
float aspect_ratio, float angle,
float lock_alpha,
float colorize,
float posterize,
float posterize_num,
float paint)
{
MyPaintTiledSurface *self = (MyPaintTiledSurface *)surface;
gboolean surface_modified = FALSE;
// Normal pass
if (draw_dab_internal(self, x, y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, angle,
lock_alpha, colorize, posterize, posterize_num, paint)) {
surface_modified = TRUE;
}
// Symmetry pass
if(self->surface_do_symmetry) {
const float dist_x = (self->surface_center_x - x);
const float dist_y = (self->surface_center_y - y);
const float symm_x = self->surface_center_x + dist_x;
const float symm_y = self->surface_center_y + dist_y;
const float dab_dist = sqrt(dist_x * dist_x + dist_y * dist_y);
const float rot_width = 360.0 / ((float) self->rot_symmetry_lines);
const float dab_angle_offset = atan2(-dist_y, -dist_x) / (2 * M_PI) * 360.0;
int dab_count = 1;
int sub_dab_count = 0;
switch(self->symmetry_type) {
case MYPAINT_SYMMETRY_TYPE_VERTICAL:
if (draw_dab_internal(self, symm_x, y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, -angle,
lock_alpha, colorize, posterize, posterize_num, paint)) {
surface_modified = TRUE;
}
break;
case MYPAINT_SYMMETRY_TYPE_HORIZONTAL:
if (draw_dab_internal(self, x, symm_y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, angle + 180.0,
lock_alpha, colorize, posterize, posterize_num, paint)) {
surface_modified = TRUE;
}
break;
case MYPAINT_SYMMETRY_TYPE_VERTHORZ:
// reflect vertically
if (draw_dab_internal(self, symm_x, y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, -angle,
lock_alpha, colorize, posterize, posterize_num, paint)) {
dab_count++;
}
// reflect horizontally
if (draw_dab_internal(self, x, symm_y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, angle + 180.0,
lock_alpha, colorize, posterize, posterize_num, paint)) {
dab_count++;
}
// reflect horizontally and vertically
if (draw_dab_internal(self, symm_x, symm_y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, -angle - 180.0,
lock_alpha, colorize, posterize, posterize_num, paint)) {
dab_count++;
}
if (dab_count == 4) {
surface_modified = TRUE;
}
break;
case MYPAINT_SYMMETRY_TYPE_SNOWFLAKE: {
gboolean failed_subdabs = FALSE;
// draw self->rot_symmetry_lines snowflake dabs
// because the snowflaked version of the initial dab
// was not done through carrying out the initial pass
for (sub_dab_count = 0; sub_dab_count < self->rot_symmetry_lines; sub_dab_count++) {
// calculate the offset from rotational symmetry
const float symmetry_angle_offset = ((float)sub_dab_count) * rot_width;
// subtract the angle offset since we're progressing clockwise
const float cur_angle = symmetry_angle_offset - dab_angle_offset;
// progress through the rotation angle offsets clockwise
// to reflect the dab relative to itself
const float rot_x = self->surface_center_x - dab_dist*cos(cur_angle / 180.0 * M_PI);
const float rot_y = self->surface_center_y - dab_dist*sin(cur_angle / 180.0 * M_PI);
if (!draw_dab_internal(self, rot_x, rot_y, radius, color_r, color_g, color_b,
opaque, hardness, color_a,
aspect_ratio, -angle + symmetry_angle_offset,
lock_alpha, colorize, posterize, posterize_num, paint)) {
failed_subdabs = TRUE;
break;
}
}
// do not bother falling to rotational if the snowflaked dabs failed
if (failed_subdabs) {
break;
}
// if it succeeded, fallthrough to rotational to finish the process
}
case MYPAINT_SYMMETRY_TYPE_ROTATIONAL: {
// draw self-rot_symmetry_lines rotational dabs
// since initial pass handles the first dab
for (dab_count = 1; dab_count < self->rot_symmetry_lines; dab_count++)
{
// calculate the offset from rotational symmetry
const float symmetry_angle_offset = ((float)dab_count) * rot_width;
// add the angle initial dab is from center point
const float cur_angle = symmetry_angle_offset + dab_angle_offset;
// progress through the rotation cangle offsets counterclockwise
const float rot_x = self->surface_center_x + dab_dist*cos(cur_angle / 180.0 * M_PI);
const float rot_y = self->surface_center_y + dab_dist*sin(cur_angle / 180.0 * M_PI);
if (!draw_dab_internal(self, rot_x, rot_y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio,
angle + symmetry_angle_offset,
lock_alpha, colorize, posterize, posterize_num, paint)) {
break;
}
}
if (dab_count == self->rot_symmetry_lines) {
surface_modified = TRUE;
}
break;
}
}
}
return surface_modified;
}
void get_color (MyPaintSurface *surface, float x, float y,
float radius,
float * color_r, float * color_g, float * color_b, float * color_a,
float paint
)
{
MyPaintTiledSurface *self = (MyPaintTiledSurface *)surface;
if (radius < 1.0f) radius = 1.0f;
const float hardness = 0.5f;
const float aspect_ratio = 1.0f;
const float angle = 0.0f;
float sum_weight, sum_r, sum_g, sum_b, sum_a;
sum_weight = sum_r = sum_g = sum_b = sum_a = 0.0f;
// in case we return with an error
*color_r = 0.0f;
*color_g = 1.0f;
*color_b = 0.0f;
// WARNING: some code duplication with draw_dab
float r_fringe = radius + 1.0f; // +1 should not be required, only to be sure
int tx1 = floor(floor(x - r_fringe) / MYPAINT_TILE_SIZE);
int tx2 = floor(floor(x + r_fringe) / MYPAINT_TILE_SIZE);
int ty1 = floor(floor(y - r_fringe) / MYPAINT_TILE_SIZE);
int ty2 = floor(floor(y + r_fringe) / MYPAINT_TILE_SIZE);
#ifdef _OPENMP
int tiles_n = (tx2 - tx1) * (ty2 - ty1);
#endif
#pragma omp parallel for schedule(static) if(self->threadsafe_tile_requests && tiles_n > 3)
for (int ty = ty1; ty <= ty2; ty++) {
for (int tx = tx1; tx <= tx2; tx++) {
// Flush queued draw_dab operations
process_tile(self, tx, ty);
MyPaintTileRequest request_data;
const int mipmap_level = 0;
mypaint_tile_request_init(&request_data, mipmap_level, tx, ty, TRUE);
mypaint_tiled_surface_tile_request_start(self, &request_data);
uint16_t * rgba_p = request_data.buffer;
if (!rgba_p) {
printf("Warning: Unable to get tile!\n");
break;
}
// first, we calculate the mask (opacity for each pixel)
uint16_t mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
render_dab_mask(mask,
x - tx*MYPAINT_TILE_SIZE,
y - ty*MYPAINT_TILE_SIZE,
radius,
hardness,
aspect_ratio, angle
);
// TODO: try atomic operations instead
#pragma omp critical
{
get_color_pixels_accumulate (mask, rgba_p,
&sum_weight, &sum_r, &sum_g, &sum_b, &sum_a, paint);
}
mypaint_tiled_surface_tile_request_end(self, &request_data);
}
}
assert(sum_weight > 0.0f);
sum_a /= sum_weight;
*color_a = sum_a;
// now un-premultiply the alpha
if (sum_a > 0.0f) {
*color_r = sum_r;
*color_g = sum_g;
*color_b = sum_b;
} else {
// it is all transparent, so don't care about the colors
// (let's make them ugly so bugs will be visible)
*color_r = 0.0f;
*color_g = 1.0f;
*color_b = 0.0f;
}
// fix rounding problems that do happen due to floating point math
*color_r = CLAMP(*color_r, 0.0f, 1.0f);
*color_g = CLAMP(*color_g, 0.0f, 1.0f);
*color_b = CLAMP(*color_b, 0.0f, 1.0f);
*color_a = CLAMP(*color_a, 0.0f, 1.0f);
}
/**
* mypaint_tiled_surface_init: (skip)
*
* Initialize the surface, passing in implementations of the tile backend.
* Note: Only intended to be called from subclasses of #MyPaintTiledSurface
**/
void
mypaint_tiled_surface_init(MyPaintTiledSurface *self,
MyPaintTileRequestStartFunction tile_request_start,
MyPaintTileRequestEndFunction tile_request_end)
{
mypaint_surface_init(&self->parent);
self->parent.draw_dab = draw_dab;
self->parent.get_color = get_color;
self->parent.begin_atomic = begin_atomic_default;
self->parent.end_atomic = end_atomic_default;
self->tile_request_end = tile_request_end;
self->tile_request_start = tile_request_start;
self->tile_size = MYPAINT_TILE_SIZE;
self->threadsafe_tile_requests = FALSE;
self->dirty_bbox.x = 0;
self->dirty_bbox.y = 0;
self->dirty_bbox.width = 0;
self->dirty_bbox.height = 0;
self->surface_do_symmetry = FALSE;
self->symmetry_type = MYPAINT_SYMMETRY_TYPE_VERTICAL;
self->surface_center_x = 0.0f;
self->surface_center_y = 0.0f;
self->rot_symmetry_lines = 2;
self->operation_queue = operation_queue_new();
}
/**
* mypaint_tiled_surface_destroy: (skip)
*
* Deallocate resources set up by mypaint_tiled_surface_init()
* Does not free the #MyPaintTiledSurface itself.
* Note: Only intended to be called from subclasses of #MyPaintTiledSurface
*/
void
mypaint_tiled_surface_destroy(MyPaintTiledSurface *self)
{
operation_queue_free(self->operation_queue);
}
|
ZQ_CNN_MTCNN_Interface.h | #ifndef _ZQ_CNN_MTCNN_INTERFACE_H_
#define _ZQ_CNN_MTCNN_INTERFACE_H_
#pragma once
#include "ZQ_CNN_Net_Interface.h"
#include "ZQ_CNN_Tensor4D_Interface.h"
#include "ZQ_CNN_BBoxUtils.h"
#include <omp.h>
namespace ZQ
{
template<class ZQ_CNN_Net_Interface, class ZQ_CNN_Tensor4D_Interface, class ZQ_CNN_Tensor4D_Interface_Base>
class ZQ_CNN_MTCNN_Interface
{
public:
using string = std::string;
ZQ_CNN_MTCNN_Interface()
{
min_size = 60;
thresh[0] = 0.6;
thresh[1] = 0.7;
thresh[2] = 0.7;
nms_thresh[0] = 0.6;
nms_thresh[1] = 0.7;
nms_thresh[2] = 0.7;
width = 0;
height = 0;
factor = 0.709;
pnet_overlap_thresh_count = 4;
pnet_size = 12;
pnet_stride = 2;
special_handle_very_big_face = false;
force_run_pnet_multithread = false;
show_debug_info = false;
limit_r_num = 0;
limit_o_num = 0;
limit_l_num = 0;
}
~ZQ_CNN_MTCNN_Interface()
{
}
private:
#if __ARM_NEON
const int BATCH_SIZE = 16;
#else
const int BATCH_SIZE = 64;
#endif
std::vector<ZQ_CNN_Net_Interface> pnet, rnet, onet, lnet;
bool has_lnet;
int thread_num;
float thresh[3], nms_thresh[3];
int min_size;
int width, height;
float factor;
int pnet_overlap_thresh_count;
int pnet_size;
int pnet_stride;
int rnet_size;
int onet_size;
int lnet_size;
bool special_handle_very_big_face;
bool do_landmark;
float early_accept_thresh;
float nms_thresh_per_scale;
bool force_run_pnet_multithread;
std::vector<float> scales;
std::vector<ZQ_CNN_Tensor4D_Interface> pnet_images;
ZQ_CNN_Tensor4D_Interface ori_input, rnet_image, onet_image;
bool show_debug_info;
int limit_r_num;
int limit_o_num;
int limit_l_num;
public:
void TurnOnShowDebugInfo() { show_debug_info = true; }
void TurnOffShowDebugInfo() { show_debug_info = false; }
void SetLimit(int limit_r = 0, int limit_o = 0, int limit_l = 0)
{
limit_r_num = limit_r;
limit_o_num = limit_o;
limit_l_num = limit_l;
}
bool Init(const string& pnet_param, const string& pnet_model, const string& rnet_param, const string& rnet_model,
const string& onet_param, const string& onet_model, int thread_num = 1,
bool has_lnet = false, const string& lnet_param = "", const std::string& lnet_model = "")
{
if (thread_num < 1)
force_run_pnet_multithread = true;
else
force_run_pnet_multithread = false;
thread_num = __max(1, thread_num);
pnet.resize(thread_num);
rnet.resize(thread_num);
onet.resize(thread_num);
this->has_lnet = has_lnet;
if (has_lnet)
{
lnet.resize(thread_num);
}
bool ret = true;
for (int i = 0; i < thread_num; i++)
{
ret = pnet[i].LoadFrom(pnet_param, pnet_model, true, 1e-9, true)
&& rnet[i].LoadFrom(rnet_param, rnet_model, true, 1e-9, true)
&& onet[i].LoadFrom(onet_param, onet_model, true, 1e-9, true);
if (has_lnet && ret)
ret = lnet[i].LoadFrom(lnet_param, lnet_model, true, 1e-9, true);
if (!ret)
break;
}
if (!ret)
{
pnet.clear();
rnet.clear();
onet.clear();
if (has_lnet)
lnet.clear();
this->thread_num = 0;
}
else
this->thread_num = thread_num;
if (show_debug_info)
{
printf("rnet = %.2f M, onet = %.2f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0),
onet[0].GetNumOfMulAdd() / (1024.0*1024.0));
if (has_lnet)
printf("lnet = %.2f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0));
}
int C, H, W;
rnet[0].GetInputDim(C, H, W);
rnet_size = H;
onet[0].GetInputDim(C, H, W);
onet_size = H;
if (has_lnet)
{
lnet[0].GetInputDim(C, H, W);
lnet_size = H;
}
return ret;
}
void SetPara(int w, int h, int min_face_size = 60, float pthresh = 0.6, float rthresh = 0.7, float othresh = 0.7,
float nms_pthresh = 0.6, float nms_rthresh = 0.7, float nms_othresh = 0.7, float scale_factor = 0.709,
int pnet_overlap_thresh_count = 4, int pnet_size = 12, int pnet_stride = 2, bool special_handle_very_big_face = false,
bool do_landmark = true, float early_accept_thresh = 1.00)
{
min_size = __max(pnet_size, min_face_size);
thresh[0] = __max(0.1, pthresh); thresh[1] = __max(0.1, rthresh); thresh[2] = __max(0.1, othresh);
nms_thresh[0] = __max(0.1, nms_pthresh); nms_thresh[1] = __max(0.1, nms_rthresh); nms_thresh[2] = __max(0.1, nms_othresh);
scale_factor = __max(0.5, __min(0.97, scale_factor));
this->pnet_overlap_thresh_count = __max(0, pnet_overlap_thresh_count);
this->pnet_size = pnet_size;
this->pnet_stride = pnet_stride;
this->special_handle_very_big_face = special_handle_very_big_face;
this->do_landmark = do_landmark;
this->early_accept_thresh = early_accept_thresh;
if (pnet_size == 20 && pnet_stride == 4)
nms_thresh_per_scale = 0.45;
else
nms_thresh_per_scale = 0.495;
if (width != w || height != h || factor != scale_factor)
{
scales.clear();
pnet_images.clear();
width = w; height = h;
float minside = __min(width, height);
int MIN_DET_SIZE = pnet_size;
float m = (float)MIN_DET_SIZE / min_size;
minside *= m;
while (minside > MIN_DET_SIZE)
{
scales.push_back(m);
minside *= factor;
m *= factor;
}
minside = __min(width, height);
int count = scales.size();
for (int i = scales.size() - 1; i >= 0; i--)
{
if (ceil(scales[i] * minside) <= pnet_size)
{
count--;
}
}
if (special_handle_very_big_face)
{
if (count > 2)
count--;
scales.resize(count);
if (count > 0)
{
float last_size = ceil(scales[count - 1] * minside);
for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2)
{
scales.push_back((float)tmp_size / minside);
count++;
}
}
scales.push_back((float)pnet_size / minside);
count++;
}
else
{
scales.push_back((float)pnet_size / minside);
count++;
}
pnet_images.resize(count);
}
}
bool Find(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& results)
{
double t1 = omp_get_wtime();
if (width != _width || height != _height)
return false;
if (!ori_input.ConvertFromBGR(bgr_img, width, height, _widthStep))
return false;
double t2 = omp_get_wtime();
if (show_debug_info)
printf("convert cost: %.3f ms\n", 1000 * (t2 - t1));
return Find(ori_input, results);
}
bool Find106(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox106>& results)
{
double t1 = omp_get_wtime();
if (width != _width || height != _height)
return false;
if (!ori_input.ConvertFromBGR(bgr_img, width, height, _widthStep))
return false;
double t2 = omp_get_wtime();
if (show_debug_info)
printf("convert cost: %.3f ms\n", 1000 * (t2 - t1));
return Find106(ori_input, results);
}
bool Find(ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& results)
{
double t1 = omp_get_wtime();
std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox;
if (!_Pnet_stage(input, firstBbox))
return false;
//results = firstBbox;
//return true;
if (limit_r_num > 0)
{
_select(firstBbox, limit_r_num, input.GetW(), input.GetH());
}
double t2 = omp_get_wtime();
if (!_Rnet_stage(input, firstBbox, secondBbox))
return false;
//results = secondBbox;
//return true;
if (limit_o_num > 0)
{
_select(secondBbox, limit_o_num, input.GetW(), input.GetH());
}
if (!has_lnet || !do_landmark)
{
double t3 = omp_get_wtime();
if (!_Onet_stage(input, secondBbox, results))
return false;
double t4 = omp_get_wtime();
if (show_debug_info)
{
printf("final found num: %d\n", (int)results.size());
printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms)\n",
1000 * (t4 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3));
}
}
else
{
double t3 = omp_get_wtime();
if (!_Onet_stage(input, secondBbox, thirdBbox))
return false;
if (limit_l_num > 0)
{
_select(thirdBbox, limit_l_num, input.GetW(), input.GetH());
}
double t4 = omp_get_wtime();
if (!_Lnet_stage(input, thirdBbox, results))
return false;
double t5 = omp_get_wtime();
if (show_debug_info)
{
printf("final found num: %d\n", (int)results.size());
printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n",
1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4));
}
}
return true;
}
bool Find106(ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox106>& results)
{
double t1 = omp_get_wtime();
std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox;
if (!_Pnet_stage(input, firstBbox))
return false;
//results = firstBbox;
//return true;
if (limit_r_num > 0)
{
_select(firstBbox, limit_r_num, input.GetW(), input.GetH());
}
double t2 = omp_get_wtime();
if (!_Rnet_stage(input, firstBbox, secondBbox))
return false;
//results = secondBbox;
//return true;
if (limit_o_num > 0)
{
_select(secondBbox, limit_o_num, input.GetW(), input.GetH());
}
if (!has_lnet || !do_landmark)
{
return false;
}
double t3 = omp_get_wtime();
if (!_Onet_stage(input, secondBbox, thirdBbox))
return false;
if (limit_l_num > 0)
{
_select(thirdBbox, limit_l_num, input.GetW(), input.GetH());
}
double t4 = omp_get_wtime();
if (!_Lnet106_stage(input, thirdBbox, results))
return false;
double t5 = omp_get_wtime();
if (show_debug_info)
{
printf("final found num: %d\n", (int)results.size());
printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n",
1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4));
}
return true;
}
private:
void _compute_Pnet_single_thread(ZQ_CNN_Tensor4D_Interface& input, std::vector<std::vector<float> >& maps,
std::vector<int>& mapH, std::vector<int>& mapW)
{
int scale_num = 0;
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
scale_num++;
mapH.push_back((changedH - pnet_size) / pnet_stride + 1);
mapW.push_back((changedW - pnet_size) / pnet_stride + 1);
}
maps.resize(scale_num);
for (int i = 0; i < scale_num; i++)
{
maps[i].resize(mapH[i] * mapW[i]);
}
for (int i = 0; i < scale_num; i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
float cur_scale_x = (float)width / changedW;
float cur_scale_y = (float)height / changedH;
double t10 = omp_get_wtime();
if (scales[i] != 1)
{
input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0);
}
double t11 = omp_get_wtime();
if (scales[i] != 1)
pnet[0].Forward(pnet_images[i]);
else
pnet[0].Forward(input);
double t12 = omp_get_wtime();
if (show_debug_info)
printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n",
i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11));
const ZQ_CNN_Tensor4D_Interface_Base* score = pnet[0].GetBlobByName("prob1");
//score p
int scoreH = score->GetH();
int scoreW = score->GetW();
int scorePixStep = score->GetPixelStep();
const float *p = score->GetFirstPixelPtr() + 1;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
if (row < mapH[i] && col < mapW[i])
maps[i][row*mapW[i] + col] = *p;
p += scorePixStep;
}
}
}
}
void _compute_Pnet_multi_thread(ZQ_CNN_Tensor4D_Interface& input, std::vector<std::vector<float> >& maps,
std::vector<int>& mapH, std::vector<int>& mapW)
{
if (thread_num <= 1)
{
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
if (scales[i] != 1)
{
input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0);
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic, 1)
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
if (scales[i] != 1)
{
input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0);
}
}
}
int scale_num = 0;
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
scale_num++;
mapH.push_back((changedH - pnet_size) / pnet_stride + 1);
mapW.push_back((changedW - pnet_size) / pnet_stride + 1);
}
maps.resize(scale_num);
for (int i = 0; i < scale_num; i++)
{
maps[i].resize(mapH[i] * mapW[i]);
}
std::vector<int> task_rect_off_x;
std::vector<int> task_rect_off_y;
std::vector<int> task_rect_width;
std::vector<int> task_rect_height;
std::vector<float> task_scale;
std::vector<int> task_scale_id;
int stride = pnet_stride;
const int block_size = 64 * stride;
int cellsize = pnet_size;
int border_size = cellsize - stride;
int overlap_border_size = cellsize / stride;
int jump_size = block_size - border_size;
for (int i = 0; i < scales.size(); i++)
{
int changeH = (int)ceil(height*scales[i]);
int changeW = (int)ceil(width*scales[i]);
if (changeH < pnet_size || changeW < pnet_size)
continue;
int block_H_num = 0;
int block_W_num = 0;
int start = 0;
while (start < changeH)
{
block_H_num++;
if (start + block_size >= changeH)
break;
start += jump_size;
}
start = 0;
while (start < changeW)
{
block_W_num++;
if (start + block_size >= changeW)
break;
start += jump_size;
}
for (int s = 0; s < block_H_num; s++)
{
for (int t = 0; t < block_W_num; t++)
{
int rect_off_x = t * jump_size;
int rect_off_y = s * jump_size;
int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x;
int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y;
if (rect_width >= cellsize && rect_height >= cellsize)
{
task_rect_off_x.push_back(rect_off_x);
task_rect_off_y.push_back(rect_off_y);
task_rect_width.push_back(rect_width);
task_rect_height.push_back(rect_height);
task_scale.push_back(scales[i]);
task_scale_id.push_back(i);
}
}
}
}
//
int task_num = task_scale.size();
std::vector<ZQ_CNN_Tensor4D_Interface> task_pnet_images(thread_num);
if (thread_num <= 1)
{
for (int i = 0; i < task_num; i++)
{
int thread_id = omp_get_thread_num();
int scale_id = task_scale_id[i];
float cur_scale = task_scale[i];
int i_rect_off_x = task_rect_off_x[i];
int i_rect_off_y = task_rect_off_y[i];
int i_rect_width = task_rect_width[i];
int i_rect_height = task_rect_height[i];
if (scale_id == 0 && scales[0] == 1)
{
if (!input.ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
else
{
if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
if (!pnet[thread_id].Forward(task_pnet_images[thread_id]))
continue;
const ZQ_CNN_Tensor4D_Interface_Base* score = pnet[thread_id].GetBlobByName("prob1");
int task_count = 0;
//score p
int scoreH = score->GetH();
int scoreW = score->GetW();
int scorePixStep = score->GetPixelStep();
const float *p = score->GetFirstPixelPtr() + 1;
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
int real_row = row + i_rect_off_y / stride;
int real_col = col + i_rect_off_x / stride;
if (real_row < mapH[scale_id] && real_col < mapW[scale_id])
maps[scale_id][real_row*mapW[scale_id] + real_col] = *p;
p += scorePixStep;
}
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num)
for (int i = 0; i < task_num; i++)
{
int thread_id = omp_get_thread_num();
int scale_id = task_scale_id[i];
float cur_scale = task_scale[i];
int i_rect_off_x = task_rect_off_x[i];
int i_rect_off_y = task_rect_off_y[i];
int i_rect_width = task_rect_width[i];
int i_rect_height = task_rect_height[i];
if (scale_id == 0 && scales[0] == 1)
{
if (!input.ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
else
{
if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
if (!pnet[thread_id].Forward(task_pnet_images[thread_id]))
continue;
const ZQ_CNN_Tensor4D_Interface_Base* score = pnet[thread_id].GetBlobByName("prob1");
int task_count = 0;
//score p
int scoreH = score->GetH();
int scoreW = score->GetW();
int scorePixStep = score->GetPixelStep();
const float *p = score->GetFirstPixelPtr() + 1;
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
int real_row = row + i_rect_off_y / stride;
int real_col = col + i_rect_off_x / stride;
if (real_row < mapH[scale_id] && real_col < mapW[scale_id])
maps[scale_id][real_row*mapW[scale_id] + real_col] = *p;
p += scorePixStep;
}
}
}
}
}
bool _Pnet_stage(ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& firstBbox)
{
if (thread_num <= 0)
return false;
double t1 = omp_get_wtime();
firstBbox.clear();
std::vector<std::vector<float> > maps;
std::vector<int> mapH;
std::vector<int> mapW;
if (thread_num == 1 && !force_run_pnet_multithread)
{
pnet[0].TurnOffShowDebugInfo();
//pnet[0].TurnOnShowDebugInfo();
_compute_Pnet_single_thread(input, maps, mapH, mapW);
}
else
{
_compute_Pnet_multi_thread(input, maps, mapH, mapW);
}
ZQ_CNN_OrderScore order;
std::vector<std::vector<ZQ_CNN_BBox> > bounding_boxes(scales.size());
std::vector<std::vector<ZQ_CNN_OrderScore> > bounding_scores(scales.size());
const int block_size = 32;
int stride = pnet_stride;
int cellsize = pnet_size;
int border_size = cellsize / stride;
for (int i = 0; i < maps.size(); i++)
{
double t13 = omp_get_wtime();
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
float cur_scale_x = (float)width / changedW;
float cur_scale_y = (float)height / changedH;
int count = 0;
//score p
int scoreH = mapH[i];
int scoreW = mapW[i];
const float *p = &maps[i][0];
if (scoreW <= block_size && scoreH < block_size)
{
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
if (*p > thresh[0])
{
bbox.score = *p;
order.score = *p;
order.oriOrder = count;
bbox.row1 = stride*row;
bbox.col1 = stride*col;
bbox.row2 = stride*row + cellsize;
bbox.col2 = stride*col + cellsize;
bbox.exist = true;
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size)
&& (col >= border_size && col < scoreW - border_size);
bounding_boxes[i].push_back(bbox);
bounding_scores[i].push_back(order);
count++;
}
p++;
}
}
int before_count = bounding_boxes[i].size();
ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count);
int after_count = bounding_boxes[i].size();
for (int j = 0; j < after_count; j++)
{
ZQ_CNN_BBox& bbox = bounding_boxes[i][j];
bbox.row1 = round(bbox.row1 *cur_scale_y);
bbox.col1 = round(bbox.col1 *cur_scale_x);
bbox.row2 = round(bbox.row2 *cur_scale_y);
bbox.col2 = round(bbox.col2 *cur_scale_x);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
}
double t14 = omp_get_wtime();
if (show_debug_info)
printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count);
}
else
{
int before_count = 0, after_count = 0;
int block_H_num = __max(1, scoreH / block_size);
int block_W_num = __max(1, scoreW / block_size);
int block_num = block_H_num*block_W_num;
int width_per_block = scoreW / block_W_num;
int height_per_block = scoreH / block_H_num;
std::vector<std::vector<ZQ_CNN_BBox> > tmp_bounding_boxes(block_num);
std::vector<std::vector<ZQ_CNN_OrderScore> > tmp_bounding_scores(block_num);
std::vector<int> block_start_w(block_num), block_end_w(block_num);
std::vector<int> block_start_h(block_num), block_end_h(block_num);
for (int bh = 0; bh < block_H_num; bh++)
{
for (int bw = 0; bw < block_W_num; bw++)
{
int bb = bh * block_W_num + bw;
block_start_w[bb] = (bw == 0) ? 0 : (bw*width_per_block - border_size);
block_end_w[bb] = (bw == block_num - 1) ? scoreW : ((bw + 1)*width_per_block);
block_start_h[bb] = (bh == 0) ? 0 : (bh*height_per_block - border_size);
block_end_h[bb] = (bh == block_num - 1) ? scoreH : ((bh + 1)*height_per_block);
}
}
int chunk_size = 1;// ceil((float)block_num / thread_num);
if (thread_num <= 1)
{
for (int bb = 0; bb < block_num; bb++)
{
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
int count = 0;
for (int row = block_start_h[bb]; row < block_end_h[bb]; row++)
{
p = &maps[i][0] + row*scoreW + block_start_w[bb];
for (int col = block_start_w[bb]; col < block_end_w[bb]; col++)
{
if (*p > thresh[0])
{
bbox.score = *p;
order.score = *p;
order.oriOrder = count;
bbox.row1 = stride*row;
bbox.col1 = stride*col;
bbox.row2 = stride*row + cellsize;
bbox.col2 = stride*col + cellsize;
bbox.exist = true;
bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size)
&& (col >= border_size && col < scoreW - border_size);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
tmp_bounding_boxes[bb].push_back(bbox);
tmp_bounding_scores[bb].push_back(order);
count++;
}
p++;
}
}
int tmp_before_count = tmp_bounding_boxes[bb].size();
ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count);
int tmp_after_count = tmp_bounding_boxes[bb].size();
before_count += tmp_before_count;
after_count += tmp_after_count;
}
}
else
{
#pragma omp parallel for schedule(dynamic, chunk_size) num_threads(thread_num)
for (int bb = 0; bb < block_num; bb++)
{
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
int count = 0;
for (int row = block_start_h[bb]; row < block_end_h[bb]; row++)
{
const float* p = &maps[i][0] + row*scoreW + block_start_w[bb];
for (int col = block_start_w[bb]; col < block_end_w[bb]; col++)
{
if (*p > thresh[0])
{
bbox.score = *p;
order.score = *p;
order.oriOrder = count;
bbox.row1 = stride*row;
bbox.col1 = stride*col;
bbox.row2 = stride*row + cellsize;
bbox.col2 = stride*col + cellsize;
bbox.exist = true;
bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size)
&& (col >= border_size && col < scoreW - border_size);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
tmp_bounding_boxes[bb].push_back(bbox);
tmp_bounding_scores[bb].push_back(order);
count++;
}
p++;
}
}
int tmp_before_count = tmp_bounding_boxes[bb].size();
ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count);
int tmp_after_count = tmp_bounding_boxes[bb].size();
before_count += tmp_before_count;
after_count += tmp_after_count;
}
}
count = 0;
for (int bb = 0; bb < block_num; bb++)
{
std::vector<ZQ_CNN_BBox>::iterator it = tmp_bounding_boxes[bb].begin();
for (; it != tmp_bounding_boxes[bb].end(); it++)
{
if ((*it).exist)
{
bounding_boxes[i].push_back(*it);
order.score = (*it).score;
order.oriOrder = count;
bounding_scores[i].push_back(order);
count++;
}
}
}
//ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", 0);
after_count = bounding_boxes[i].size();
for (int j = 0; j < after_count; j++)
{
ZQ_CNN_BBox& bbox = bounding_boxes[i][j];
bbox.row1 = round(bbox.row1 *cur_scale_y);
bbox.col1 = round(bbox.col1 *cur_scale_x);
bbox.row2 = round(bbox.row2 *cur_scale_y);
bbox.col2 = round(bbox.col2 *cur_scale_x);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
}
double t14 = omp_get_wtime();
if (show_debug_info)
printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count);
}
}
std::vector<ZQ_CNN_OrderScore> firstOrderScore;
int count = 0;
for (int i = 0; i < scales.size(); i++)
{
std::vector<ZQ_CNN_BBox>::iterator it = bounding_boxes[i].begin();
for (; it != bounding_boxes[i].end(); it++)
{
if ((*it).exist)
{
firstBbox.push_back(*it);
order.score = (*it).score;
order.oriOrder = count;
firstOrderScore.push_back(order);
count++;
}
}
}
//the first stage's nms
if (count < 1) return false;
double t15 = omp_get_wtime();
ZQ_CNN_BBoxUtils::_nms(firstBbox, firstOrderScore, nms_thresh[0], "Union", 0, 1);
ZQ_CNN_BBoxUtils::_refine_and_square_bbox(firstBbox, width, height, true);
double t16 = omp_get_wtime();
if (show_debug_info)
printf("nms cost: %.3f ms\n", 1000 * (t16 - t15));
if (show_debug_info)
printf("first stage candidate count: %d\n", count);
double t3 = omp_get_wtime();
if (show_debug_info)
printf("stage 1: cost %.3f ms\n", 1000 * (t3 - t1));
return true;
}
bool _Rnet_stage(const ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& firstBbox, std::vector<ZQ_CNN_BBox>& secondBbox)
{
double t3 = omp_get_wtime();
secondBbox.clear();
std::vector<ZQ_CNN_BBox>::iterator it = firstBbox.begin();
std::vector<ZQ_CNN_OrderScore> secondScore;
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int r_count = 0;
for (; it != firstBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
r_count++;
secondBbox.push_back(*it);
}
}
}
int batch_size = BATCH_SIZE;
int per_num = ceil((float)r_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)r_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_Interface> task_rnet_images(need_thread_num);
std::vector<std::vector<int> > task_src_off_x(need_thread_num);
std::vector<std::vector<int> > task_src_off_y(need_thread_num);
std::vector<std::vector<int> > task_src_rect_w(need_thread_num);
std::vector<std::vector<int> > task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox> > task_secondBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(r_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_secondBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_secondBbox[i][j] = secondBbox[st_id + j];
}
}
}
if (thread_num <= 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
rnet[0].Forward(task_rnet_images[pp]);
const ZQ_CNN_Tensor4D_Interface_Base* score = rnet[0].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D_Interface_Base* location = rnet[0].GetBlobByName("conv5-2");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int task_count = 0;
for (int i = 0; i < task_secondBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[1])
{
for (int j = 0; j < 4; j++)
task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_secondBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_secondBbox[pp].clear();
continue;
}
for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_secondBbox[pp][i].exist)
task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i);
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,1)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
rnet[thread_id].Forward(task_rnet_images[pp]);
const ZQ_CNN_Tensor4D_Interface_Base* score = rnet[thread_id].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D_Interface_Base* location = rnet[thread_id].GetBlobByName("conv5-2");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int task_count = 0;
for (int i = 0; i < task_secondBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[1])
{
for (int j = 0; j < 4; j++)
task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_secondBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_secondBbox[pp].clear();
continue;
}
for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_secondBbox[pp][i].exist)
task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i);
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_secondBbox[i].size();
}
secondBbox.resize(count);
secondScore.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_secondBbox[i].size(); j++)
{
secondBbox[id] = task_secondBbox[i][j];
secondScore[id].score = secondBbox[id].score;
secondScore[id].oriOrder = id;
id++;
}
}
//ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Union");
ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Min");
ZQ_CNN_BBoxUtils::_refine_and_square_bbox(secondBbox, width, height, true);
count = secondBbox.size();
double t4 = omp_get_wtime();
if (show_debug_info)
printf("run Rnet [%d] times, candidate after nms: %d \n", r_count, count);
if (show_debug_info)
printf("stage 2: cost %.3f ms\n", 1000 * (t4 - t3));
return true;
}
bool _Onet_stage(const ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& secondBbox, std::vector<ZQ_CNN_BBox>& thirdBbox)
{
double t4 = omp_get_wtime();
thirdBbox.clear();
std::vector<ZQ_CNN_BBox>::iterator it = secondBbox.begin();
std::vector<ZQ_CNN_OrderScore> thirdScore;
std::vector<ZQ_CNN_BBox> early_accept_thirdBbox;
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int o_count = 0;
for (; it != secondBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
if (!do_landmark && it->score > early_accept_thresh)
{
early_accept_thirdBbox.push_back(*it);
}
else
{
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
o_count++;
thirdBbox.push_back(*it);
}
}
}
}
int batch_size = BATCH_SIZE;
int per_num = ceil((float)o_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)o_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_Interface> task_onet_images(need_thread_num);
std::vector<std::vector<int> > task_src_off_x(need_thread_num);
std::vector<std::vector<int> > task_src_off_y(need_thread_num);
std::vector<std::vector<int> > task_src_rect_w(need_thread_num);
std::vector<std::vector<int> > task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox> > task_thirdBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(o_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_thirdBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_thirdBbox[i][j] = thirdBbox[st_id + j];
}
}
}
if (thread_num <= 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x.size() == 0 || task_src_off_x[pp].size() == 0)
continue;
if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
onet[0].Forward(task_onet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D_Interface_Base* score = onet[0].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D_Interface_Base* location = onet[0].GetBlobByName("conv6-2");
const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = onet[0].GetBlobByName("conv6-3");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
const float* keyPoint_ptr = 0;
if (keyPoint != 0)
keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int keyPoint_sliceStep = 0;
if (keyPoint != 0)
keyPoint_sliceStep = keyPoint->GetSliceStep();
int task_count = 0;
ZQ_CNN_OrderScore order;
for (int i = 0; i < task_thirdBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[2])
{
for (int j = 0; j < 4; j++)
task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
if (keyPoint != 0)
{
for (int num = 0; num < 5; num++)
{
task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 +
(task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 +
(task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_thirdBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_thirdBbox[pp].clear();
continue;
}
for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_thirdBbox[pp][i].exist)
task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i);
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,1)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0 || task_src_off_x[pp].size() == 0)
continue;
if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
onet[thread_id].Forward(task_onet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D_Interface_Base* score = onet[thread_id].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D_Interface_Base* location = onet[thread_id].GetBlobByName("conv6-2");
const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = onet[thread_id].GetBlobByName("conv6-3");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
const float* keyPoint_ptr = 0;
if (keyPoint != 0)
keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int keyPoint_sliceStep = 0;
if (keyPoint != 0)
keyPoint_sliceStep = keyPoint->GetSliceStep();
int task_count = 0;
ZQ_CNN_OrderScore order;
for (int i = 0; i < task_thirdBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[2])
{
for (int j = 0; j < 4; j++)
task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
if (keyPoint != 0)
{
for (int num = 0; num < 5; num++)
{
task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 +
(task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 +
(task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_thirdBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_thirdBbox[pp].clear();
continue;
}
for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_thirdBbox[pp][i].exist)
task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i);
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_thirdBbox[i].size();
}
thirdBbox.resize(count);
thirdScore.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_thirdBbox[i].size(); j++)
{
thirdBbox[id] = task_thirdBbox[i][j];
thirdScore[id].score = task_thirdBbox[i][j].score;
thirdScore[id].oriOrder = id;
id++;
}
}
ZQ_CNN_BBoxUtils::_refine_and_square_bbox(thirdBbox, width, height, false);
ZQ_CNN_OrderScore order;
for (int i = 0; i < early_accept_thirdBbox.size(); i++)
{
order.score = early_accept_thirdBbox[i].score;
order.oriOrder = count++;
thirdScore.push_back(order);
thirdBbox.push_back(early_accept_thirdBbox[i]);
}
ZQ_CNN_BBoxUtils::_nms(thirdBbox, thirdScore, nms_thresh[2], "Min");
double t5 = omp_get_wtime();
if (show_debug_info)
printf("run Onet [%d] times, candidate before nms: %d \n", o_count, count);
if (show_debug_info)
printf("stage 3: cost %.3f ms\n", 1000 * (t5 - t4));
return true;
}
bool _Lnet_stage(const ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox>& fourthBbox)
{
double t4 = omp_get_wtime();
fourthBbox.clear();
std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin();
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int l_count = 0;
for (; it != thirdBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
l_count++;
fourthBbox.push_back(*it);
}
}
}
std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox;
ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height);
for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
}
int batch_size = BATCH_SIZE;
int per_num = ceil((float)l_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)l_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_Interface> task_lnet_images(need_thread_num);
std::vector<std::vector<int> > task_src_off_x(need_thread_num);
std::vector<std::vector<int> > task_src_off_y(need_thread_num);
std::vector<std::vector<int> > task_src_rect_w(need_thread_num);
std::vector<std::vector<int> > task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox> > task_fourthBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(l_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_fourthBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_fourthBbox[i][j] = copy_fourthBbox[st_id + j];
}
}
}
if (thread_num <= 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[0].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = lnet[0].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < 5; num++)
{
task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,1)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[thread_id].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = lnet[thread_id].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < 5; num++)
{
task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_fourthBbox[i].size();
}
fourthBbox.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_fourthBbox[i].size(); j++)
{
memcpy(fourthBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 10);
id++;
}
}
double t5 = omp_get_wtime();
if (show_debug_info)
printf("run Lnet [%d] times \n", l_count);
if (show_debug_info)
printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4));
return true;
}
bool _Lnet106_stage(const ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox106>& resultBbox)
{
double t4 = omp_get_wtime();
std::vector<ZQ_CNN_BBox> fourthBbox;
std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin();
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int l_count = 0;
for (; it != thirdBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
l_count++;
fourthBbox.push_back(*it);
}
}
}
std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox;
ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height);
for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
}
int batch_size = BATCH_SIZE;
int per_num = ceil((float)l_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)l_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num);
std::vector<std::vector<int> > task_src_off_x(need_thread_num);
std::vector<std::vector<int> > task_src_off_y(need_thread_num);
std::vector<std::vector<int> > task_src_rect_w(need_thread_num);
std::vector<std::vector<int> > task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox106> > task_fourthBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(l_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_fourthBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_fourthBbox[i][j].col1 = copy_fourthBbox[st_id + j].col1;
task_fourthBbox[i][j].col2 = copy_fourthBbox[st_id + j].col2;
task_fourthBbox[i][j].row1 = copy_fourthBbox[st_id + j].row1;
task_fourthBbox[i][j].row2 = copy_fourthBbox[st_id + j].row2;
task_fourthBbox[i][j].area = copy_fourthBbox[st_id + j].area;
task_fourthBbox[i][j].score = copy_fourthBbox[st_id + j].score;
task_fourthBbox[i][j].exist = copy_fourthBbox[st_id + j].exist;
}
}
}
resultBbox.resize(l_count);
for (int i = 0; i < l_count; i++)
{
resultBbox[i].col1 = fourthBbox[i].col1;
resultBbox[i].col2 = fourthBbox[i].col2;
resultBbox[i].row1 = fourthBbox[i].row1;
resultBbox[i].row2 = fourthBbox[i].row2;
resultBbox[i].score = fourthBbox[i].score;
resultBbox[i].exist = fourthBbox[i].exist;
resultBbox[i].area = fourthBbox[i].area;
}
if (thread_num <= 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x[pp].size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[0].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = lnet[0].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keypoint_num = keyPoint->GetC() / 2;
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < keypoint_num; num++)
{
if ((num >= 33 && num < 43) || (num >= 64 && num < 72) || (num >= 84 && num < 104))
{
task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]/**0.25*/;
task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]/**0.25*/;
}
else
{
task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2];
task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1];
}
}
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[thread_id].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = lnet[thread_id].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keypoint_num = keyPoint->GetC() / 2;
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < keypoint_num; num++)
{
if ((num >= 33 && num < 43) || (num >= 64 && num < 72) || (num >= 84 && num < 104))
{
task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2] * 0.5;
task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1] * 0.5;
}
else
{
task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2];
task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1];
}
}
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_fourthBbox[i].size();
}
resultBbox.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_fourthBbox[i].size(); j++)
{
memcpy(resultBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 212);
id++;
}
}
double t5 = omp_get_wtime();
if (show_debug_info)
printf("run Lnet [%d] times \n", l_count);
if (show_debug_info)
printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4));
return true;
}
void _select(std::vector<ZQ_CNN_BBox>& bbox, int limit_num, int width, int height)
{
int in_num = bbox.size();
if (limit_num >= in_num)
return;
bbox.resize(limit_num);
}
};
}
#endif
|
parallel.h | #pragma once
namespace pbbs {
//***************************************
// All the pbbs library uses only four functions for
// accessing parallelism.
// These can be implemented on top of any scheduler.
//***************************************
// number of threads available from OS
// template <>
int num_workers();
// id of running thread, should be numbered from [0...num-workers)
int worker_id();
void set_num_workers(int n);
#ifdef SAGE
static int numanode();
#endif
// the granularity of a simple loop (e.g. adding one to each element
// of an array) to reasonably hide cost of scheduler
// #define PAR_GRANULARITY 2000
// parallel loop from start (inclusive) to end (exclusive) running
// function f.
// f should map long to void.
// granularity is the number of iterations to run sequentially
// if 0 (default) then the scheduler will decide
// conservative uses a safer scheduler
template <typename F>
static void parallel_for(long start, long end, F f, long granularity = 0,
bool conservative = false);
// runs the thunks left and right in parallel.
// both left and write should map void to void
// conservative uses a safer scheduler
template <typename Lf, typename Rf>
static void par_do(Lf left, Rf right, bool conservative = false);
template <typename A, typename Af, typename Df, typename F>
static void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start,
long end, F f, long granularity = 0,
bool conservative = false);
} // namespace pbbs
//***************************************
// cilkplus
#if defined(CILK)
#include <cilk/cilk.h>
#include <cilk/cilk_api.h>
#include <cilk/reducer.h>
#include <iostream>
#include <sstream>
#define PAR_GRANULARITY 2000
namespace pbbs {
template <typename F>
inline void parallel_for(long start, long end, F f, long granularity,
bool conservative) {
if (granularity == 0)
cilk_for(long i = start; i < end; i++) f(i);
else if ((end - start) <= granularity)
for (long i = start; i < end; i++)
f(i);
else {
long n = end - start;
long mid = (start + (9 * (n + 1)) / 16);
cilk_spawn parallel_for(start, mid, f, granularity);
parallel_for(mid, end, f, granularity);
cilk_sync;
}
}
template <typename F>
inline void parallel_for_1(long start, long end, F f, long granularity,
bool conservative) {
_Pragma("cilk grainsize = 1") cilk_for(long i = start; i < end; i++) f(i);
}
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool conservative) {
cilk_spawn right();
left();
cilk_sync;
}
template <typename A> class alloc_holder {
struct Monoid : cilk::monoid_base<A> {
static void reduce(A *left, A *right) {}
};
public:
cilk::reducer<Monoid> imp_;
alloc_holder() : imp_() {}
};
// TODO try parallel_for_1
template <typename A, typename Af, typename Df, typename F>
inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start,
long end, F f, long granularity,
bool conservative) {
alloc_holder<A> alloc;
parallel_for_1(
start, end,
[&](size_t i) {
init_alloc(&alloc.imp_.view());
f(i, &(alloc.imp_.view()));
// finish_alloc(&(alloc.imp_.view()));
},
granularity, conservative);
}
inline int num_workers() { return __cilkrts_get_nworkers(); }
inline int worker_id() { return __cilkrts_get_worker_number(); }
#ifdef SAGE
inline int numanode() {
std::cout << "numanode() only supported with homegrown scheduler"
<< std::endl;
exit(-1);
return 1;
}
#endif
} // namespace pbbs
// openmp
#elif defined(OPENMP)
#include <omp.h>
#include <stddef.h>
#define PAR_GRANULARITY 200000
namespace pbbs {
extern bool in_par_do;
template <class F>
inline void parallel_for(long start, long end, F f, long granularity,
bool conservative) {
_Pragma("omp parallel for") for (long i = start; i < end; i++) f(i);
}
template <typename F>
inline void parallel_for_1(long start, long end, F f, long granularity,
bool conservative) {
#pragma omp for schedule(dynamic, 1) nowait
for (long i = start; i < end; i++)
f(i);
}
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool conservative) {
if (!in_par_do) {
in_par_do = true; // at top level start up tasking
#pragma omp parallel
#pragma omp single
#pragma omp task
left();
#pragma omp task
right();
#pragma omp taskwait
in_par_do = false;
} else { // already started
#pragma omp task
left();
#pragma omp task
right();
}
}
template <typename Job> inline void parallel_run(Job job, int num_threads = 0) {
job();
}
template <typename A, typename Af, typename Df, typename F>
inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start,
long end, F f, long granularity,
bool conservative) {
A *alloc = nullptr;
#pragma omp parallel private(alloc)
{
alloc = new A();
init_alloc(alloc);
parallel_for_1(
start, end, [&](size_t i) { f(i, alloc); }, granularity,
conservative);
//#pragma omp for schedule(dynamic, 1) nowait
// for(long i=start; i<end; i++) f(i, alloc);
finish_alloc(alloc);
}
}
inline int num_workers() { return omp_get_max_threads(); }
inline int worker_id() { return omp_get_thread_num(); }
#ifdef SAGE
inline int numanode() {
std::cout << "numanode() only supported with homegrown scheduler"
<< std::endl;
exit(-1);
return 1;
}
#endif
} // namespace pbbs
// Guy's scheduler (ABP)
#elif defined(HOMEGROWN)
#include "scheduler.h"
#define PAR_GRANULARITY 512
namespace pbbs {
template <class F>
inline void parallel_for(long start, long end, F f, long granularity,
bool conservative) {
pbbs::global_scheduler.parfor(start, end, f, granularity, conservative);
}
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool conservative) {
return pbbs::global_scheduler.pardo(left, right, conservative);
}
template <typename Job> inline void parallel_run(Job job, int num_threads = 0) {
job();
}
template <typename A, typename Af, typename Df, typename F>
inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start,
long end, F f, long granularity,
bool conservative) {
parallel_for(
start, end,
[&](long i) {
static thread_local A *alloc = new A();
init_alloc(alloc);
f(i, alloc);
},
granularity, conservative);
// finish_alloc(alloc);
}
inline int num_workers() { return pbbs::global_scheduler.num_workers(); }
inline int worker_id() { return pbbs::global_scheduler.worker_id(); }
#ifdef SAGE
inline int numanode() { return pbbs::global_scheduler.numanode(); }
#endif
} // namespace pbbs
// c++
#else
#define PAR_GRANULARITY 1000
namespace pbbs {
template <class F>
inline void parallel_for(long start, long end, F f, long granularity,
bool conservative) {
for (long i = start; i < end; i++) {
f(i);
}
}
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool conservative) {
left();
right();
}
template <typename Job> inline void parallel_run(Job job, int num_threads = 0) {
job();
}
template <typename A, typename Af, typename Df, typename F>
inline void parallel_for_alloc(Af init_alloc, Df finish_alloc, long start,
long end, F f, long granularity,
bool conservative) {
A *alloc = new A();
init_alloc(alloc);
for (long i = start; i < end; i++) {
f(i, alloc);
}
finish_alloc(alloc);
}
inline int num_workers() { return 1; }
inline int worker_id() { return 0; }
} // namespace pbbs
#endif
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
struct OMPTraitProperty;
struct OMPTraitSelector;
struct OMPTraitSet;
class OMPTraitInfo;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> FloatControlHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFenvAccessHandler;
std::unique_ptr<PragmaHandler> STDCFenvRoundHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// Current kind of OpenMP clause
OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
void MaybeDestroyTemplateIds() {
if (!TemplateIds.empty() &&
(Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens()))
DestroyTemplateIds();
}
void DestroyTemplateIds();
/// RAII object to destroy TemplateIdAnnotations where possible, from a
/// likely-good position during parsing.
struct DestroyTemplateIdAnnotationsRAIIObj {
Parser &Self;
DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {}
~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); }
};
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ROUND...
void HandlePragmaFEnvRound();
/// Handle the annotation token produced for
/// #pragma float_control
void HandlePragmaFloatControl();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static TypeResult getTypeAnnotation(const Token &Tok) {
if (!Tok.getAnnotationValue())
return TypeError();
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, TypeResult T) {
assert((T.isInvalid() || T.get()) &&
"produced a valid-but-null type annotation?");
Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) &&
NextToken().is(tok::coloncolon)) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
}
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser &p)
: P(p), PrevPreferredType(P.PreferredType) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
/// Kinds of compound pseudo-tokens formed by a sequence of two real tokens.
enum class CompoundToken {
/// A '(' '{' beginning a statement-expression.
StmtExprBegin,
/// A '}' ')' ending a statement-expression.
StmtExprEnd,
/// A '[' '[' beginning a C++11 or C2x attribute.
AttrBegin,
/// A ']' ']' ending a C++11 or C2x attribute.
AttrEnd,
/// A '::' '*' forming a C++ pointer-to-member declaration.
MemberPtr,
};
/// Check that a compound operator was written in a "sensible" way, and warn
/// if not.
void checkCompoundToken(SourceLocation FirstTokLoc,
tok::TokenKind FirstTokKind, CompoundToken Op);
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// Introduces zero or more scopes for parsing. The scopes will all be exited
/// when the object is destroyed.
class MultiParseScope {
Parser &Self;
unsigned NumScopes = 0;
MultiParseScope(const MultiParseScope&) = delete;
public:
MultiParseScope(Parser &Self) : Self(Self) {}
void Enter(unsigned ScopeFlags) {
Self.EnterScope(ScopeFlags);
++NumScopes;
}
void Exit() {
while (NumScopes) {
Self.ExitScope();
--NumScopes;
}
}
~MultiParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
/// Re-enter the template scopes for a declaration that might be a template.
unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D);
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character. Balances (), [], and {} delimiter tokens while
/// skipping.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
virtual void ParseLexedPragmas();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
void ParseLexedPragmas() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
/// Contains the lexed tokens of a pragma with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
class LateParsedPragma : public LateParsedDeclaration {
Parser *Self = nullptr;
AccessSpecifier AS = AS_none;
CachedTokens Toks;
public:
explicit LateParsedPragma(Parser *P, AccessSpecifier AS)
: Self(P), AS(AS) {}
void takeToks(CachedTokens &Cached) { Toks.swap(Cached); }
const CachedTokens &toks() const { return Toks; }
AccessSpecifier getAccessSpecifier() const { return AS; }
void ParseLexedPragmas() override;
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser *Self;
/// Method - The method declaration.
Decl *Method;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), IsInterface(IsInterface),
TagOrTemplate(TagOrTemplate) {}
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
// In ParseCXXInlineMethods.cpp.
struct ReenterTemplateScopeRAII;
struct ReenterClassScopeRAII;
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
void ParseLexedPragmas(ParsingClass &Class);
void ParseLexedPragma(LateParsedPragma &LP);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc is filled with the location of the last token of the simple-asm.
ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc);
ExprResult ParseAsmStringLiteral(bool ForAsmLabel);
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause);
ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause);
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
/// Control what ParseCastExpression will parse.
enum CastParseKind {
AnyCastExpr = 0,
UnaryExprOnly,
PrimaryExprOnly
};
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseUniqueStableNameExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHasErrors,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr,
bool EnterForConditionScope = false);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C++ Concepts
ExprResult ParseRequiresExpression();
void ParseTrailingRequiresClause(Declarator &D);
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
struct DesignatorCompletionInfo {
SmallVectorImpl<Expr *> &InitExprs;
QualType PreferredBaseType;
};
ExprResult ParseInitializerWithPotentialDesignator(DesignatorCompletionInfo);
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc, Sema::ConditionKind CK,
SourceLocation *LParenLoc = nullptr,
SourceLocation *RParenLoc = nullptr);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Whether a defining-type-specifier is permitted in a given context.
enum class AllowDefiningTypeSpec {
/// The grammar doesn't allow a defining-type-specifier here, and we must
/// not parse one (eg, because a '{' could mean something else).
No,
/// The grammar doesn't allow a defining-type-specifier here, but we permit
/// one for error recovery purposes. Sema will reject.
NoButErrorRecovery,
/// The grammar allows a defining-type-specifier here, even though it's
/// always invalid. Sema will reject.
YesButInvalid,
/// The grammar allows a defining-type-specifier here, and one can be valid.
Yes
};
/// Is this a context in which we are parsing defining-type-specifiers (and
/// so permit class and enum definitions in addition to non-defining class and
/// enum elaborated-type-specifiers)?
static AllowDefiningTypeSpec
isDefiningTypeSpecifierContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
return AllowDefiningTypeSpec::Yes;
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
return AllowDefiningTypeSpec::YesButInvalid;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
return AllowDefiningTypeSpec::NoButErrorRecovery;
case DeclSpecContext::DSC_trailing:
return AllowDefiningTypeSpec::No;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which an opaque-enum-declaration can appear?
static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
return true;
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
RecordDecl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Determine whether we could have an enum-base.
///
/// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise
/// only consider this to be an enum-base if the next token is a '{'.
///
/// \return \c false if this cannot possibly be an enum base; \c true
/// otherwise.
bool isEnumBase(bool AllowSemi);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an '(' after an 'explicit' keyword is part of a C++20
/// 'explicit(bool)' declaration, in earlier language modes where that is an
/// extension.
TPResult isExplicitBool();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
/// Try to skip a possibly empty sequence of 'attribute-specifier's without
/// full validation of the syntactic structure of attributes.
bool TrySkipAttributes();
public:
TypeResult
ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context = DeclaratorContext::TypeName,
AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID,
bool DiagnoseEmptyAttrs = false);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
enum ParseAttrKindMask {
PAKM_GNU = 1 << 0,
PAKM_Declspec = 1 << 1,
PAKM_CXX11 = 1 << 2,
};
/// \brief Parse attributes based on what syntaxes are desired, allowing for
/// the order to vary. e.g. with PAKM_GNU | PAKM_Declspec:
/// __attribute__((...)) __declspec(...) __attribute__((...)))
/// Note that Microsoft attributes (spelled with single square brackets) are
/// not supported by this because of parsing ambiguities with other
/// constructs.
///
/// There are some attribute parse orderings that should not be allowed in
/// arbitrary order. e.g.,
///
/// [[]] __attribute__(()) int i; // OK
/// __attribute__(()) [[]] int i; // Not OK
///
/// Such situations should use the specific attribute parsing functionality.
void ParseAttributes(unsigned WhichAttrKinds,
ParsedAttributesWithRange &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr);
void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseAttributes(WhichAttrKinds, AttrsWithRange, End, LateAttrs);
Attrs.takeAllFrom(AttrsWithRange);
}
/// \brief Possibly parse attributes based on what syntaxes are desired,
/// allowing for the order to vary.
bool MaybeParseAttributes(unsigned WhichAttrKinds,
ParsedAttributesWithRange &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
(standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
return true;
}
return false;
}
bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
(standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
return true;
}
return false;
}
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
/// Parses GNU-style attributes and returns them without source range
/// information.
///
/// This API is discouraged. Use the version that takes a
/// ParsedAttributesWithRange instead.
bool MaybeParseGNUAttributes(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
Attrs.takeAllFrom(AttrsWithRange);
return true;
}
return false;
}
bool MaybeParseGNUAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
return true;
}
return false;
}
/// Parses GNU-style attributes and returns them without source range
/// information.
///
/// This API is discouraged. Use the version that takes a
/// ParsedAttributesWithRange instead.
void ParseGNUAttributes(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseGNUAttributes(AttrsWithRange, EndLoc, LateAttrs, D);
Attrs.takeAllFrom(AttrsWithRange);
}
void ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
bool MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
return true;
}
return false;
}
bool MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) {
ParseCXX11Attributes(attrs, endLoc);
return true;
}
return false;
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) {
ParseMicrosoftDeclSpecs(Attrs, End);
return true;
}
return false;
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
ExprResult ParseExtIntegerArgument();
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
DeclaratorContext DeclaratorContext,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool
ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHadErrors,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse a property kind into \p TIProperty for the selector set \p Set and
/// selector \p Selector.
void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty,
llvm::omp::TraitSet Set,
llvm::omp::TraitSelector Selector,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector kind into \p TISelector for the selector set \p Set.
void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector set kind into \p TISet.
void parseOMPTraitSetKind(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context property.
void parseOMPContextProperty(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context selector.
void parseOMPContextSelector(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &SeenSelectors);
/// Parses an OpenMP context selector set.
void parseOMPContextSelectorSet(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &SeenSets);
/// Parses OpenMP context selectors.
bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse a `match` clause for an '#pragma omp declare variant'. Return true
/// if there was an error.
bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI,
OMPTraitInfo *ParentTI);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse 'omp [begin] assume[s]' directive.
void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parse 'omp end assumes' directive.
void ParseOpenMPEndAssumesDirective(SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if
/// it is not the current token.
void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind);
/// Check the \p FoundKind against the \p ExpectedKind, if not issue an error
/// that the "end" matching the "begin" directive of kind \p BeginKind was not
/// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd
/// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`.
void parseOMPEndDirective(OpenMPDirectiveKind BeginKind,
OpenMPDirectiveKind ExpectedKind,
OpenMPDirectiveKind FoundKind,
SourceLocation MatchingLoc,
SourceLocation FoundLoc,
bool SkipUntilOpenMPEnd);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Tries to parse cast part of OpenMP array shaping operation:
/// '[' expression ']' { '[' expression ']' } ')'.
bool tryParseOpenMPArrayShapingCastPart();
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param DKind Directive kind.
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses the 'sizes' clause of a '#pragma omp tile' directive.
OMPClause *ParseOpenMPSizesClause();
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
/// Parses and creates OpenMP 5.0 iterators expression:
/// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier =
/// <range-specification> }+ ')'
ExprResult ParseOpenMPIteratorsExpr();
/// Parses allocators and traits in the context of the uses_allocator clause.
/// Expected format:
/// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')'
OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind);
/// Parses clause with an interop variable of kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
//
OMPClause *ParseOpenMPInteropClause(OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *DepModOrTailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers>
MapTypeModifiersLoc;
SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers>
MotionModifiers;
SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation ExtraModifierLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
bool ObjectHadErrors, bool EnteringContext,
bool AllowDestructorName, bool AllowConstructorName,
bool AllowDeductionGuide,
SourceLocation *TemplateKWLoc, UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
TPResult isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
bool isTypeConstraintAnnotation();
bool TryAnnotateTypeConstraint();
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
class GNUAsmQualifiers {
unsigned Qualifiers = AQ_unspecified;
public:
enum AQ {
AQ_unspecified = 0,
AQ_volatile = 1,
AQ_inline = 2,
AQ_goto = 4,
};
static const char *getQualifierName(AQ Qualifier);
bool setAsmQualifier(AQ Qualifier);
inline bool isVolatile() const { return Qualifiers & AQ_volatile; };
inline bool isInline() const { return Qualifiers & AQ_inline; };
inline bool isGoto() const { return Qualifiers & AQ_goto; }
};
bool isGCCAsmStatement(const Token &TokAfterAsm) const;
bool isGNUAsmQualifier(const Token &TokAfterAsm) const;
GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const;
bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ);
};
} // end namespace clang
#endif
|
target_x86.h | /*****************************************************************************
*
* target_x86.h
*
* Edinburgh Soft Matter and Statistical Physics Group and
* Edinburgh Parallel Computing Centre
*
* (c) 2018 The University of Edinbugh
*
* Contributing authors:
* Alan Gray (alang@epcc.ed.ac.uk)
* Kevin Stratford (kevin@epcc.ed.ac.uk)
*
*****************************************************************************/
#ifndef LUDWIG_TARGET_X86_H
#define LUDWIG_TARGET_X86_H
typedef enum tdpFuncCache_enum {
tdpFuncCachePreferNone = 0,
tdpFuncCachePreferShared = 1,
tdpFuncCachePreferL1 = 2,
tdpFuncCachePreferEqual = 3}
tdpFuncCache;
typedef enum tdpMemcpyKind_enum {
tdpMemcpyHostToHost = 0,
tdpMemcpyHostToDevice = 1,
tdpMemcpyDeviceToHost = 2,
tdpMemcpyDeviceToDevice = 3,
tdpMemcpyDefault = 4}
tdpMemcpyKind;
/* Device attributes (potentially a lot of them) */
typedef enum tdpDeviceAttr_enum {
tdpDevAttrMaxThreadsPerBlock = 1,
tdpDevAttrMaxBlockDimX = 2,
tdpDevAttrMaxBlockDimY = 3,
tdpDevAttrMaxBlockDimZ = 4,
tdpDevAttrMaxGridDimX = 5,
tdpDevAttrMaxGridDimY = 6,
tdpDevAttrMaxGridDimZ = 7,
tdpDevAttrManagedMemory = 83
} tdpDeviceAttr;
/* tdpGetLastError() can return... */
enum tdpError {
tdpSuccess = 0,
tdpErrorMissingConfiguration = 1,
tdpErrorMemoryAllocation = 2,
tdpErrorInitializationError = 3,
tdpErrorLaunchFailure = 4,
tdpErrorLaunchTimeout = 6,
tdpErrorLaunchOutOfResources = 7,
tdpErrorInvalidDeviceFunction = 8,
tdpErrorInvalidConfiguration = 9,
tdpErrorInvalidDevice = 10,
tdpErrorInvalidValue = 11,
tdpErrorInvalidPitchValue = 12,
tdpErrorInvalidSymbol = 13,
tdpErrorUnmapBufferObjectFailed = 15,
tdpErrorInvalidHostPointer = 16,
tdpErrorInvalidDevicePointer = 17,
tdpErrorInvalidTexture = 18,
tdpErrorInvalidTextureBinding = 19,
tdpErrorInvalidChannelDescriptor = 20,
tdpErrorInvalidMemcpyDirection = 21,
tdpErrorInvalidFilterSetting = 26,
tdpErrorUnknown = 30,
tdpErrorInvalidResourceHandle = 33,
tdpErrorInsufficientDriver = 35,
tdpErrorSetOnActiveProcess = 36,
tdpErrorInvalidSurface = 37,
tdpErrorNoDevice = 38,
tdpErrorStartupFailure = 0x7f
};
#define tdpHostAllocDefault 0x00
#define tdpHostAllocMapped 0x02
#define tdpHostAllocPortable 0x01
#define tdpHostAllocWriteCombined 0x04
#define tdpMemAttachGlobal 0x01
#define tdpMemAttachHost 0x02
#define tdpMemAttachSingle 0x04
/* Device memory qualifiers / executation space qualifiers */
#define __host__
#define __global__
#define __shared__ static
#define __device__
#define __constant__
#if (__STDC__VERSION__ >= 19901)
#define __forceinline__
#define __noinline__
#else
#define __forceinline__
#define __noinline__
#endif
/* Built-in variable implementation. */
typedef struct tdp_uint3_s uint3;
typedef struct tdp_dim3_s dim3;
struct tdp_uint3_s {
unsigned int x;
unsigned int y;
unsigned int z;
};
struct tdp_dim3_s {
int x;
int y;
int z;
};
extern dim3 gridDim;
extern dim3 blockDim;
extern dim3 threadIdx;
extern dim3 blockIdx;
#ifdef _OPENMP
/* These names are reserved and must be ... */
#pragma omp threadprivate(gridDim, blockDim, threadIdx, blockIdx)
#endif
typedef enum tdpError tdpError_t; /* an enum type */
typedef int * tdpStream_t; /* an opaque handle */
/* Incomplete. */
struct tdpDeviceProp {
int maxThreadsPerBlock;
int maxThreadsDim[3];
};
#define tdpSymbol(x) &(x)
void tdp_x86_prelaunch(dim3 nblocks, dim3 nthreads);
void tdp_x86_postlaunch(void);
#ifdef _OPENMP
/* Help to expand OpenMP clauses which need to be retained as strings */
#define xstr(a) str(a)
#define str(a) #a
/* Have OpenMP */
#include <omp.h>
#define TARGET_MAX_THREADS_PER_BLOCK 256
#define __syncthreads() _Pragma("omp barrier")
/* Kernel launch is a __VA_ARGS__ macro, thus: */
#define tdpLaunchKernel(kernel, nblocks, nthreads, shmem, stream, ...) \
_Pragma("omp parallel") \
{ \
tdp_x86_prelaunch(nblocks, nthreads); \
kernel(__VA_ARGS__); \
tdp_x86_postlaunch(); \
}
/* OpenMP work sharing */
#define for_simt_parallel(index, ndata, stride) \
_Pragma("omp for nowait") \
for (index = 0; index < (ndata); index += (stride))
/* SIMD safe loops */
#define for_simd_v(iv, nsimdvl) \
_Pragma("omp simd") \
for (iv = 0; iv < (nsimdvl); ++iv)
#define for_simd_v_reduction(iv, nsimdvl, clause) \
_Pragma(xstr(omp simd reduction(clause))) \
for (iv = 0; iv < nsimdvl; ++iv)
#else /* Not OPENMP */
#define TARGET_MAX_THREADS_PER_BLOCK 1
#define omp_get_num_threads() 1
#define omp_get_thread_num() 0
#define omp_get_max_threads() 1
#define omp_set_num_threads(n)
#define __syncthreads()
/* NULL implementation */
/* Kernel launch is a __VA_ARGS__ macro, thus: */
#define tdpLaunchKernel(kernel, nblocks, nthreads, shmem, stream, ...) \
tdp_x86_prelaunch(nblocks, nthreads); \
kernel(__VA_ARGS__); \
tdp_x86_postlaunch();
/* "Worksharing" is provided by a loop */
#define for_simt_parallel(index, ndata, stride) \
for (index = 0; index < (ndata); index += (stride))
/* Vectorised loops */
#define for_simd_v(iv, nsimdvl) for (iv = 0; iv < (nsimdvl); iv++)
#define for_simd_v_reduction(iv, nsimdvl, clause) \
for (iv = 0; iv < nsimdvl; iv++)
#endif /* _OPENMP */
#define tdp_get_max_threads() omp_get_max_threads()
#endif
|
DRB110-ordered-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <stdio.h>
/* This is a program based on a test contributed by Yizi Gu@Rice Univ.
* Proper user of ordered directive and clause, no data races
* */
int main()
{
int x =0;
#pragma omp parallel for reduction(+:x)
for (int i = 0; i < 100; ++i) {
x++;
}
assert (x==100);
printf ("x=%d\n",x);
return 0;
}
|
pcmemory.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
/*
* -- SuperLU MT routine (version 2.2) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley,
* and Xerox Palo Alto Research Center.
* September 10, 2007
*
* Last modified:
* -- 8/29/2013: added lock to access Stack memory supplied by user
*
*/
#include "slu_mt_cdefs.h"
/* ------------------
Constants & Macros
------------------ */
#define EXPAND 1.5
#define NO_MEMTYPE 4 /* 0: lusup;
1: ucol;
2: lsub;
3: usub */
#define GluIntArray(n) (9 * (n) + 5)
/* -------------------
Internal prototypes
------------------- */
void *pcgstrf_expand (int_t *, MemType,int_t, int_t, GlobalLU_t *);
void copy_mem_complex (int_t, void *, void *);
void pcgstrf_StackCompress(GlobalLU_t *);
void pcgstrf_SetupSpace (void *, int_t);
void *cuser_malloc (int_t, int_t);
void cuser_free (int_t, int_t);
/* ----------------------------------------------
External prototypes (in memory.c - prec-indep)
---------------------------------------------- */
extern void copy_mem_int (int_t, void *, void *);
extern void user_bcopy (char *, char *, int_t);
typedef struct {
int_t size;
int_t used;
int_t top1; /* grow upward, relative to &array[0] */
int_t top2; /* grow downward */
void *array;
#if ( MACH==PTHREAD )
pthread_mutex_t lock;;
#endif
} LU_stack_t;
typedef enum {HEAD, TAIL} stack_end_t;
typedef enum {SYSTEM, USER} LU_space_t;
ExpHeader *cexpanders = 0; /* Array of pointers to 4 types of memory */
static LU_stack_t stack;
static int_t no_expand;
static int_t ndim;
static LU_space_t whichspace; /* 0 - system malloc'd; 1 - user provided */
/* Macros to manipulate stack */
#define StackFull(x) ( x + stack.used >= stack.size )
#define NotDoubleAlign(addr) ( (long long int)addr & 7 )
#define DoubleAlign(addr) ( ((long long int)addr + 7) & ~7L )
#define Reduce(alpha) ((alpha + 1) / 2) /* i.e. (alpha-1)/2 + 1 */
/* temporary space used by BLAS calls */
#define NUM_TEMPV(n,w,t,b) (SUPERLU_MAX( 2*n, (t + b)*w ))
/*
* Setup the memory model to be used for factorization.
* lwork = 0: use system malloc;
* lwork > 0: use user-supplied work[] space.
*/
void pcgstrf_SetupSpace(void *work, int_t lwork)
{
if ( lwork == 0 ) {
whichspace = SYSTEM; /* malloc/free */
} else if ( lwork > 0 ) {
whichspace = USER; /* user provided space */
stack.size = lwork;
stack.used = 0;
stack.top1 = 0;
stack.top2 = lwork;
stack.array = (void *) work;
}
#if ( MACH==PTHREAD )
pthread_mutex_init ( &stack.lock, NULL);
#endif
}
/*
* Destroy the lock used for user stack memory.
*/
void pcgstrf_StackFree()
{
#if ( MACH==PTHREAD ) /* Use pthread ... */
if ( whichspace == USER )
pthread_mutex_destroy( &stack.lock );
#endif
}
void *cuser_malloc(int_t bytes, int_t which_end)
{
void *buf;
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_lock( &stack.lock );
#elif ( MACH==OPENMP ) /* Use openMP ... */
#pragma omp critical ( STACK_LOCK )
#endif
{
if ( StackFull(bytes) ) {
buf = NULL;
goto end;
}
if ( which_end == HEAD ) {
buf = (char*) stack.array + stack.top1;
stack.top1 += bytes;
} else {
stack.top2 -= bytes;
buf = (char*) stack.array + stack.top2;
}
stack.used += bytes;
end: ;
} /* ---- end critical section ---- */
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_unlock( &stack.lock );
#endif
return buf;
}
void cuser_free(int_t bytes, int_t which_end)
{
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_lock( &stack.lock );
#elif ( MACH==OPENMP ) /* Use openMP ... */
#pragma omp critical ( STACK_LOCK )
#endif
{
if ( which_end == HEAD ) stack.top1 -= bytes;
else stack.top2 += bytes;
stack.used -= bytes;
}
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_unlock( &stack.lock );
#endif
}
/* Returns the working storage used during factorization */
int_t superlu_cTempSpace(int_t n, int_t w, int_t p)
{
register float tmp, ptmp;
register int_t iword = sizeof(int_t), dword = sizeof(complex);
int_t maxsuper = sp_ienv(3), rowblk = sp_ienv(4);
/* globally shared */
tmp = 14 * n * iword;
/* local to each processor */
ptmp = (2 * w + 5 + NO_MARKER) * n * iword;
ptmp += (n * w + NUM_TEMPV(n,w,maxsuper,rowblk)) * dword;
#if ( PRNTlevel>=1 )
printf("Per-processor work[] %.0f MB\n", ptmp/1024/1024);
#endif
ptmp *= p;
return (tmp + ptmp);
}
/*
* superlu_memusage consists of the following fields:
* o for_lu (float)
* The amount of space used in bytes for L\U data structures.
* o total_needed (float)
* The amount of space needed in bytes to perform factorization.
* o expansions (int)
* The number of memory expansions during the LU factorization.
*/
int_t superlu_cQuerySpace(int_t P, SuperMatrix *L, SuperMatrix *U, int_t panel_size,
superlu_memusage_t *superlu_memusage)
{
SCPformat *Lstore;
NCPformat *Ustore;
register int_t n, iword, dword, lwork;
Lstore = L->Store;
Ustore = U->Store;
n = L->ncol;
iword = sizeof(int_t);
dword = sizeof(complex);
/* L supernodes of type SCP */
superlu_memusage->for_lu = (float) (7*n + 3) * iword
+ (float) Lstore->nzval_colend[n-1] * dword
+ (float) Lstore->rowind_colend[n-1] * iword;
/* U columns of type NCP */
superlu_memusage->for_lu += (2*n + 1) * iword
+ (float) Ustore->colend[n-1] * (dword + iword);
/* Working storage to support factorization */
lwork = superlu_cTempSpace(n, panel_size, P);
superlu_memusage->total_needed = superlu_memusage->for_lu + lwork;
superlu_memusage->expansions = --no_expand;
return 0;
}
float pcgstrf_memory_use(const int_t nzlmax, const int_t nzumax, const int_t nzlumax)
{
register float iword, dword, t;
iword = sizeof(int_t);
dword = sizeof(complex);
t = 10. * ndim * iword + nzlmax * iword + nzumax * (iword + dword)
+ nzlumax * dword;
return t;
}
/*
* Allocate storage for the data structures common to all factor routines.
* For those unpredictable size, make a guess as FILL * nnz(A).
* Return value:
* If lwork = -1, return the estimated amount of space required;
* otherwise, return the amount of space actually allocated when
* memory allocation failure occurred.
*/
float
pcgstrf_MemInit(int_t n, int_t annz, superlumt_options_t *superlumt_options,
SuperMatrix *L, SuperMatrix *U, GlobalLU_t *Glu)
{
register int_t nprocs = superlumt_options->nprocs;
yes_no_t refact = superlumt_options->refact;
register int_t panel_size = superlumt_options->panel_size;
register int_t lwork = superlumt_options->lwork;
void *work = superlumt_options->work;
int_t iword, dword, retries = 0;
SCPformat *Lstore;
NCPformat *Ustore;
int_t *xsup, *xsup_end, *supno;
int_t *lsub, *xlsub, *xlsub_end;
complex *lusup;
int_t *xlusup, *xlusup_end;
complex *ucol;
int_t *usub, *xusub, *xusub_end;
int_t nzlmax, nzumax, nzlumax;
int_t FILL_LUSUP = sp_ienv(6); /* Guess the fill-in growth for LUSUP */
int_t FILL_UCOL = sp_ienv(7); /* Guess the fill-in growth for UCOL */
int_t FILL_LSUB = sp_ienv(8); /* Guess the fill-in growth for LSUB */
no_expand = 0;
ndim = n;
iword = sizeof(int_t);
dword = sizeof(complex);
if ( !cexpanders )
cexpanders = (ExpHeader *) SUPERLU_MALLOC(NO_MEMTYPE * sizeof(ExpHeader));
if ( refact == NO ) {
/* Guess amount of storage needed by L\U factors. */
if ( FILL_UCOL < 0 ) nzumax = -FILL_UCOL * annz;
else nzumax = FILL_UCOL;
if ( FILL_LSUB < 0 ) nzlmax = -FILL_LSUB * annz;
else nzlmax = FILL_LSUB;
if ( Glu->dynamic_snode_bound == YES ) {
if ( FILL_LUSUP < 0 ) nzlumax = -FILL_LUSUP * annz;
else nzlumax = FILL_LUSUP; /* estimate an upper bound */
} else {
nzlumax = Glu->nzlumax; /* preset as static upper bound */
}
if ( lwork == -1 ) {
return (GluIntArray(n) * iword +
superlu_cTempSpace(n, panel_size, nprocs)
+ (nzlmax+nzumax)*iword + (nzlumax+nzumax)*dword);
} else {
pcgstrf_SetupSpace(work, lwork);
}
/* Integer pointers for L\U factors */
if ( whichspace == SYSTEM ) {
xsup = intMalloc(n+1);
xsup_end = intMalloc(n);
supno = intMalloc(n+1);
xlsub = intMalloc(n+1);
xlsub_end = intMalloc(n);
xlusup = intMalloc(n+1);
xlusup_end = intMalloc(n);
xusub = intMalloc(n+1);
xusub_end = intMalloc(n);
} else {
xsup = (int_t *)cuser_malloc((n+1) * iword, HEAD);
xsup_end = (int_t *)cuser_malloc((n) * iword, HEAD);
supno = (int_t *)cuser_malloc((n+1) * iword, HEAD);
xlsub = (int_t *)cuser_malloc((n+1) * iword, HEAD);
xlsub_end = (int_t *)cuser_malloc((n) * iword, HEAD);
xlusup = (int_t *)cuser_malloc((n+1) * iword, HEAD);
xlusup_end = (int_t *)cuser_malloc((n) * iword, HEAD);
xusub = (int_t *)cuser_malloc((n+1) * iword, HEAD);
xusub_end = (int_t *)cuser_malloc((n) * iword, HEAD);
}
lusup = (complex *) pcgstrf_expand( &nzlumax, LUSUP, 0, 0, Glu );
ucol = (complex *) pcgstrf_expand( &nzumax, UCOL, 0, 0, Glu );
lsub = (int_t *) pcgstrf_expand( &nzlmax, LSUB, 0, 0, Glu );
usub = (int_t *) pcgstrf_expand( &nzumax, USUB, 0, 1, Glu );
while ( !ucol || !lsub || !usub ) {
/*SUPERLU_ABORT("Not enough core in LUMemInit()");*/
#if (PRNTlevel==1)
printf(".. pcgstrf_MemInit(): #retries " IFMT "\n", ++retries);
#endif
if ( whichspace == SYSTEM ) {
SUPERLU_FREE(ucol);
SUPERLU_FREE(lsub);
SUPERLU_FREE(usub);
} else {
cuser_free(nzumax*dword+(nzlmax+nzumax)*iword, HEAD);
}
nzumax /= 2; /* reduce request */
nzlmax /= 2;
if ( nzumax < annz/2 ) {
printf("Not enough memory to perform factorization.\n");
return (pcgstrf_memory_use(nzlmax, nzumax, nzlumax) + n);
}
ucol = (complex *) pcgstrf_expand( &nzumax, UCOL, 0, 0, Glu );
lsub = (int_t *) pcgstrf_expand( &nzlmax, LSUB, 0, 0, Glu );
usub = (int_t *) pcgstrf_expand( &nzumax, USUB, 0, 1, Glu );
}
if ( !lusup ) {
float t = pcgstrf_memory_use(nzlmax, nzumax, nzlumax) + n;
printf("Not enough memory to perform factorization .. "
"need %.1f GBytes\n", t*1e-9);
fflush(stdout);
return (t);
}
} else { /* refact == YES */
Lstore = L->Store;
Ustore = U->Store;
xsup = Lstore->sup_to_colbeg;
xsup_end = Lstore->sup_to_colend;
supno = Lstore->col_to_sup;
xlsub = Lstore->rowind_colbeg;
xlsub_end= Lstore->rowind_colend;
xlusup = Lstore->nzval_colbeg;
xlusup_end= Lstore->nzval_colend;
xusub = Ustore->colbeg;
xusub_end= Ustore->colend;
nzlmax = Glu->nzlmax; /* max from previous factorization */
nzumax = Glu->nzumax;
nzlumax = Glu->nzlumax;
if ( lwork == -1 ) {
return (GluIntArray(n) * iword + superlu_cTempSpace(n, panel_size, nprocs)
+ (nzlmax+nzumax)*iword + (nzlumax+nzumax)*dword);
} else if ( lwork == 0 ) {
whichspace = SYSTEM;
} else {
whichspace = USER;
stack.size = lwork;
stack.top2 = lwork;
}
lsub = cexpanders[LSUB].mem = Lstore->rowind;
lusup = cexpanders[LUSUP].mem = Lstore->nzval;
usub = cexpanders[USUB].mem = Ustore->rowind;
ucol = cexpanders[UCOL].mem = Ustore->nzval;;
cexpanders[LSUB].size = nzlmax;
cexpanders[LUSUP].size = nzlumax;
cexpanders[USUB].size = nzumax;
cexpanders[UCOL].size = nzumax;
}
Glu->xsup = xsup;
Glu->xsup_end = xsup_end;
Glu->supno = supno;
Glu->lsub = lsub;
Glu->xlsub = xlsub;
Glu->xlsub_end = xlsub_end;
Glu->lusup = lusup;
Glu->xlusup = xlusup;
Glu->xlusup_end = xlusup_end;
Glu->ucol = ucol;
Glu->usub = usub;
Glu->xusub = xusub;
Glu->xusub_end = xusub_end;
Glu->nzlmax = nzlmax;
Glu->nzumax = nzumax;
Glu->nzlumax = nzlumax;
++no_expand;
#if ( PRNTlevel>=1 )
printf(".. pcgstrf_MemInit() refact %d, whichspace %d, nzlumax " IFMT ", nzumax " IFMT ", nzlmax " IFMT "\n",
refact, whichspace, nzlumax, nzumax, nzlmax);
printf(".. pcgstrf_MemInit() FILL_LUSUP " IFMT ", FILL_UCOL " IFMT ", FILL_LSUB " IFMT "\n",
FILL_LUSUP, FILL_UCOL, FILL_LSUB);
fflush(stdout);
#endif
return 0;
} /* pcgstrf_MemInit */
/*
* Allocate known working storage. Returns 0 if success, otherwise
* returns the number of bytes allocated so far when failure occurred.
*/
int_t
pcgstrf_WorkInit(int_t n, int_t panel_size, int_t **iworkptr, complex **dworkptr)
{
int_t isize, dsize, extra;
complex *old_ptr;
int_t maxsuper = sp_ienv(3),
rowblk = sp_ienv(4);
isize = (2*panel_size + 5 + NO_MARKER) * n * sizeof(int_t);
dsize = (n * panel_size +
NUM_TEMPV(n,panel_size,maxsuper,rowblk)) * sizeof(complex);
if ( whichspace == SYSTEM )
*iworkptr = (int_t *) intCalloc(isize/sizeof(int_t));
else
*iworkptr = (int_t *) cuser_malloc(isize, TAIL);
if ( ! *iworkptr ) {
fprintf(stderr, "pcgstrf_WorkInit: malloc fails for local iworkptr[]\n");
return (isize + n);
}
if ( whichspace == SYSTEM )
*dworkptr = (complex *) SUPERLU_MALLOC((size_t) dsize);
else {
*dworkptr = (complex *) cuser_malloc(dsize, TAIL);
if ( NotDoubleAlign(*dworkptr) ) {
old_ptr = *dworkptr;
*dworkptr = (complex*) DoubleAlign(*dworkptr);
*dworkptr = (complex*) ((double*)*dworkptr - 1);
extra = (char*)old_ptr - (char*)*dworkptr;
#if ( DEBUGlevel>=1 )
printf("pcgstrf_WorkInit: not aligned, extra" IFMT "\n", extra);
#endif
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_lock( &stack.lock );
#elif ( MACH==OPENMP ) /* Use openMP ... */
#pragma omp critical ( STACK_LOCK )
#endif
{
stack.top2 -= extra;
stack.used += extra;
}
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_unlock( &stack.lock );
#endif
}
} /* else */
if ( ! *dworkptr ) {
printf("malloc fails for local dworkptr[] ... dsize " IFMT "\n", dsize);
return (isize + dsize + n);
}
return 0;
}
/*
* Set up pointers for real working arrays.
*/
void
pcgstrf_SetRWork(int_t n, int_t panel_size, complex *dworkptr,
complex **dense, complex **tempv)
{
complex zero = {0.0, 0.0};
int_t maxsuper = sp_ienv(3);
int_t rowblk = sp_ienv(4);
*dense = dworkptr;
*tempv = *dense + panel_size*n;
cfill (*dense, n * panel_size, zero);
cfill (*tempv, NUM_TEMPV(n,panel_size,maxsuper,rowblk), zero);
}
/*
* Free the working storage used by factor routines.
*/
void pcgstrf_WorkFree(int_t *iwork, complex *dwork, GlobalLU_t *Glu)
{
if ( whichspace == SYSTEM ) {
SUPERLU_FREE (iwork);
SUPERLU_FREE (dwork);
} else {
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_lock( &stack.lock );
#elif ( MACH==OPENMP ) /* Use openMP ... */
#pragma omp critical ( STACK_LOCK )
#endif
{
stack.used -= (stack.size - stack.top2);
stack.top2 = stack.size;
/* pcgstrf_StackCompress(Glu); */
}
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_unlock( &stack.lock );
#endif
}
}
/*
* Expand the data structures for L and U during the factorization.
* Return value: 0 - successful return
* > 0 - number of bytes allocated when run out of space
*
* @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
* !! Warning: Not Implemented in SuperLU_MT !!
* @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
*/
int_t
pcgstrf_MemXpand(
int_t jcol,
int_t next, /* number of elements currently in the factors */
MemType mem_type,/* which type of memory to expand */
int_t *maxlen, /* modified - max. length of a data structure */
GlobalLU_t *Glu /* modified - global LU data structures */
)
{
void *new_mem;
#ifdef CHK_EXPAND
printf("pcgstrf_MemXpand(): jcol " IFMT ", next " IFMT ", maxlen " IFMT ", MemType " IFMT "\n",
jcol, next, *maxlen, mem_type);
#endif
if (mem_type == USUB)
new_mem = pcgstrf_expand(maxlen, mem_type, next, 1, Glu);
else
new_mem = pcgstrf_expand(maxlen, mem_type, next, 0, Glu);
if ( !new_mem ) {
int_t nzlmax = Glu->nzlmax;
int_t nzumax = Glu->nzumax;
int_t nzlumax = Glu->nzlumax;
fprintf(stderr, "Can't expand MemType %d : jcol " IFMT "\n",
mem_type, jcol);
return (pcgstrf_memory_use(nzlmax, nzumax, nzlumax) + ndim);
}
switch ( mem_type ) {
case LUSUP:
Glu->lusup = (complex *) new_mem;
Glu->nzlumax = *maxlen;
break;
case UCOL:
Glu->ucol = (complex *) new_mem;
Glu->nzumax = *maxlen;
break;
case LSUB:
Glu->lsub = (int_t *) new_mem;
Glu->nzlmax = *maxlen;
break;
case USUB:
Glu->usub = (int_t *) new_mem;
Glu->nzumax = *maxlen;
break;
}
return 0;
}
void
copy_mem_complex(int_t howmany, void *old, void *new)
{
register int_t i;
complex *dold = old;
complex *dnew = new;
for (i = 0; i < howmany; i++) dnew[i] = dold[i];
}
/*
* Expand the existing storage to accommodate more fill-ins.
*/
void
*pcgstrf_expand(
int_t *prev_len, /* length used from previous call */
MemType type, /* which part of the memory to expand */
int_t len_to_copy, /* size of memory to be copied to new store */
int_t keep_prev, /* = 1: use prev_len;
= 0: compute new_len to expand */
GlobalLU_t *Glu /* modified - global LU data structures */
)
{
double alpha = EXPAND;
void *new_mem, *old_mem;
int_t new_len, tries, lword, extra, bytes_to_copy;
void *ret = NULL;
if ( no_expand == 0 || keep_prev ) /* First time allocate requested */
new_len = *prev_len;
else {
new_len = alpha * *prev_len;
}
if ( type == LSUB || type == USUB ) lword = sizeof(int_t);
else lword = sizeof(complex);
if ( whichspace == SYSTEM ) {
new_mem = (void *) SUPERLU_MALLOC( (size_t) new_len * lword );
if ( no_expand != 0 ) {
tries = 0;
if ( keep_prev ) {
if ( !new_mem ) return (NULL);
} else {
while ( !new_mem ) {
if ( ++tries > 10 ) return (NULL);
alpha = Reduce(alpha);
new_len = alpha * *prev_len;
new_mem = (void *) SUPERLU_MALLOC((size_t) new_len * lword);
}
}
if ( type == LSUB || type == USUB ) {
copy_mem_int(len_to_copy, cexpanders[type].mem, new_mem);
} else {
copy_mem_complex(len_to_copy, cexpanders[type].mem, new_mem);
}
SUPERLU_FREE (cexpanders[type].mem);
}
cexpanders[type].mem = (void *) new_mem;
} else { /* whichspace == USER */
if ( no_expand == 0 ) {
new_mem = cuser_malloc(new_len * lword, HEAD);
if ( NotDoubleAlign(new_mem) &&
(type == LUSUP || type == UCOL) ) {
old_mem = new_mem;
new_mem = (void *)DoubleAlign(new_mem);
extra = (char*)new_mem - (char*)old_mem;
#ifdef CHK_EXPAND
printf("expand(): not aligned, extra " IFMT "\n", extra);
#endif
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_lock( &stack.lock );
#elif ( MACH==OPENMP ) /* Use openMP ... */
#pragma omp critical ( STACK_LOCK )
#endif
{
stack.top1 += extra;
stack.used += extra;
}
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_unlock( &stack.lock );
#endif
}
cexpanders[type].mem = (void *) new_mem;
} else {
tries = 0;
extra = (new_len - *prev_len) * lword;
if ( keep_prev ) {
if ( StackFull(extra) ) {
new_len = 0;
cexpanders[type].mem = NULL;
return NULL;
}
} else {
while ( StackFull(extra) ) {
if ( ++tries > 10 ) {
new_len = 0;
cexpanders[type].mem = NULL;
return NULL;
}
alpha = Reduce(alpha);
new_len = alpha * *prev_len;
extra = (new_len - *prev_len) * lword;
}
}
if ( type != USUB ) {
new_mem = (void*)((char*)cexpanders[type + 1].mem + extra);
bytes_to_copy = (char*)stack.array + stack.top1
- (char*)cexpanders[type + 1].mem;
user_bcopy(cexpanders[type+1].mem, new_mem, bytes_to_copy);
if ( type < USUB ) {
Glu->usub = cexpanders[USUB].mem =
(void*)((char*)cexpanders[USUB].mem + extra);
}
if ( type < LSUB ) {
Glu->lsub = cexpanders[LSUB].mem =
(void*)((char*)cexpanders[LSUB].mem + extra);
}
if ( type < UCOL ) {
Glu->ucol = cexpanders[UCOL].mem =
(void*)((char*)cexpanders[UCOL].mem + extra);
}
stack.top1 += extra;
stack.used += extra;
if ( type == UCOL ) {
stack.top1 += extra; /* Add same amount for USUB */
stack.used += extra;
}
} /* if ... */
} /* else ... */
} /* else, whichspace == USER */
#ifdef DEBUG
printf("pcgstrf_expand[type " IFMT "]\n", type);
#endif
cexpanders[type].size = new_len;
*prev_len = new_len;
if ( no_expand ) ++no_expand;
return (void *) cexpanders[type].mem;
} /* expand */
/*
* Compress the work[] array to remove fragmentation.
*/
void
pcgstrf_StackCompress(GlobalLU_t *Glu)
{
register int_t iword, dword;
char *last, *fragment;
int_t *ifrom, *ito;
complex *dfrom, *dto;
int_t *xlsub, *lsub, *xusub_end, *usub, *xlusup;
complex *ucol, *lusup;
iword = sizeof(int_t);
dword = sizeof(complex);
xlsub = Glu->xlsub;
lsub = Glu->lsub;
xusub_end = Glu->xusub_end;
usub = Glu->usub;
xlusup = Glu->xlusup;
ucol = Glu->ucol;
lusup = Glu->lusup;
dfrom = ucol;
dto = (complex *)((char*)lusup + xlusup[ndim] * dword);
copy_mem_complex(xusub_end[ndim-1], dfrom, dto);
ucol = dto;
ifrom = lsub;
ito = (int_t *) ((char*)ucol + xusub_end[ndim-1] * iword);
copy_mem_int(xlsub[ndim], ifrom, ito);
lsub = ito;
ifrom = usub;
ito = (int_t *) ((char*)lsub + xlsub[ndim] * iword);
copy_mem_int(xusub_end[ndim-1], ifrom, ito);
usub = ito;
last = (char*)usub + xusub_end[ndim-1] * iword;
fragment = (char*) ((char*)stack.array + stack.top1 - last);
stack.used -= (long long int) fragment;
stack.top1 -= (long long int) fragment;
Glu->ucol = ucol;
Glu->lsub = lsub;
Glu->usub = usub;
#ifdef CHK_EXPAND
printf("pcgstrf_StackCompress: fragment " IFMT "\n", fragment);
/* PrintStack("After compress", Glu);
for (last = 0; last < ndim; ++last)
print_lu_col("After compress:", last, 0);*/
#endif
}
/*
* Allocate storage for original matrix A
*/
void
callocateA(int_t n, int_t nnz, complex **a, int_t **asub, int_t **xa)
{
*a = (complex *) complexMalloc(nnz);
*asub = (int_t *) intMalloc(nnz);
*xa = (int_t *) intMalloc(n+1);
}
complex *complexMalloc(int_t n)
{
complex *buf;
buf = (complex *) SUPERLU_MALLOC( (size_t) n * sizeof(complex) );
if ( !buf ) {
fprintf(stderr, "SUPERLU_MALLOC failed for buf in complexMalloc()");
exit (1);
}
return (buf);
}
complex *complexCalloc(int_t n)
{
complex *buf;
register int_t i;
complex zero = {0.0, 0.0};
buf = (complex *) SUPERLU_MALLOC( (size_t) n * sizeof(complex) );
if ( !buf ) {
fprintf(stderr, "SUPERLU_MALLOC failed for buf in complexCalloc()");
exit (1);
}
for (i = 0; i < n; ++i) buf[i] = zero;
return (buf);
}
/*
* Set up memory image in lusup[*], using the supernode boundaries in
* the Householder matrix.
*
* In both static and dynamic scheme, the relaxed supernodes (leaves)
* are stored in the beginning of lusup[*]. In the static scheme, the
* memory is also set aside for the internal supernodes using upper
* bound information from H. In the dynamic scheme, however, the memory
* for the internal supernodes is not allocated by this routine.
*
* Return value
* o Static scheme: number of nonzeros of all the supernodes in H.
* o Dynamic scheme: number of nonzeros of the relaxed supernodes.
*/
int_t
cPresetMap(
const int_t n,
SuperMatrix *A, /* original matrix permuted by columns */
pxgstrf_relax_t *pxgstrf_relax, /* relaxed supernodes */
superlumt_options_t *superlumt_options, /* input */
GlobalLU_t *Glu /* modified */
)
{
register int_t i, j, k, w, rs, rs_lastcol, krow, kmark, maxsup, nextpos;
register int_t rs_nrow; /* number of nonzero rows in a relaxed supernode */
int_t *marker, *asub, *xa_begin, *xa_end;
NCPformat *Astore;
int_t *map_in_sup; /* memory mapping function; values irrelevant on entry. */
int_t *colcnt; /* column count of Lc or H */
int_t *super_bnd; /* supernodes partition in H */
char *snode_env, *getenv();
snode_env = getenv("SuperLU_DYNAMIC_SNODE_STORE");
if ( snode_env != NULL ) {
Glu->dynamic_snode_bound = YES;
#if ( PRNTlevel>=1 )
printf(".. Use dynamic alg. to allocate storage for L supernodes.\n");
#endif
} else Glu->dynamic_snode_bound = NO;
Astore = A->Store;
asub = Astore->rowind;
xa_begin = Astore->colbeg;
xa_end = Astore->colend;
rs = 1;
marker = intMalloc(n);
ifill(marker, n, EMPTY);
map_in_sup = Glu->map_in_sup = intCalloc(n+1);
colcnt = superlumt_options->colcnt_h;
super_bnd = superlumt_options->part_super_h;
nextpos = 0;
/* Split large supernode into smaller pieces */
maxsup = sp_ienv(3);
for (j = 0; j < n; ) {
w = super_bnd[j];
k = j + w;
if ( w > maxsup ) {
w = w % maxsup;
if ( w == 0 ) w = maxsup;
while ( j < k ) {
super_bnd[j] = w;
j += w;
w = maxsup;
}
}
j = k;
}
for (j = 0; j < n; j += w) {
if ( Glu->dynamic_snode_bound == NO ) map_in_sup[j] = nextpos;
if ( pxgstrf_relax[rs].fcol == j ) {
/* Column j starts a relaxed supernode. */
map_in_sup[j] = nextpos;
rs_nrow = 0;
w = pxgstrf_relax[rs++].size;
rs_lastcol = j + w;
for (i = j; i < rs_lastcol; ++i) {
/* for each nonzero in A[*,i] */
for (k = xa_begin[i]; k < xa_end[i]; k++) {
krow = asub[k];
kmark = marker[krow];
if ( kmark != j ) { /* first time visit krow */
marker[krow] = j;
++rs_nrow;
}
}
}
nextpos += w * rs_nrow;
/* Find the next H-supernode, with leading column i, which is
outside the relaxed supernode, rs. */
for (i = j; i < rs_lastcol; k = i, i += super_bnd[i]);
if ( i > rs_lastcol ) {
/* The w columns [rs_lastcol, i) may join in the
preceeding relaxed supernode; make sure we leave
enough room for the combined supernode. */
w = i - rs_lastcol;
nextpos += w * SUPERLU_MAX( rs_nrow, colcnt[k] );
}
w = i - j;
} else { /* Column j starts a supernode in H */
w = super_bnd[j];
if ( Glu->dynamic_snode_bound == NO ) nextpos += w * colcnt[j];
}
/* Set up the offset (negative) to the leading column j of a
supernode in H */
for (i = 1; i < w; ++i) map_in_sup[j + i] = -i;
} /* for j ... */
if ( Glu->dynamic_snode_bound == YES ) Glu->nextlu = nextpos;
else map_in_sup[n] = nextpos;
#if ( PRNTlevel>=1 )
printf("** PresetMap() allocates " IFMT " reals to lusup[*]....\n", nextpos);
#endif
free (marker);
return nextpos;
}
|
ocp_nlp_common.c | /*
* Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
* Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
* Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
* Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
*
* This file is part of acados.
*
* The 2-Clause BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.;
*/
#include "acados/ocp_nlp/ocp_nlp_common.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
// blasfeo
#include "blasfeo/include/blasfeo_common.h"
#include "blasfeo/include/blasfeo_d_blas.h"
// hpipm
#include "hpipm/include/hpipm_d_ocp_qp_dim.h"
// acados
#include "acados/utils/mem.h"
#include "acados/utils/print.h"
// openmp
#if defined(ACADOS_WITH_OPENMP)
#include <omp.h>
#endif
/************************************************
* config
************************************************/
acados_size_t ocp_nlp_config_calculate_size(int N)
{
acados_size_t size = 0;
// self
size += sizeof(ocp_nlp_config);
// qp solver
size += 1 * ocp_qp_xcond_solver_config_calculate_size();
// regularization
size += ocp_nlp_reg_config_calculate_size();
// dynamics
size += N * sizeof(ocp_nlp_dynamics_config *);
for (int i = 0; i < N; i++) size += ocp_nlp_dynamics_config_calculate_size();
// cost
size += (N + 1) * sizeof(ocp_nlp_cost_config *);
for (int i = 0; i <= N; i++) size += ocp_nlp_cost_config_calculate_size();
// constraints
size += (N + 1) * sizeof(ocp_nlp_constraints_config *);
for (int i = 0; i <= N; i++) size += ocp_nlp_constraints_config_calculate_size();
return size;
}
ocp_nlp_config *ocp_nlp_config_assign(int N, void *raw_memory)
{
char *c_ptr = (char *) raw_memory;
ocp_nlp_config *config = (ocp_nlp_config *) c_ptr;
c_ptr += sizeof(ocp_nlp_config);
config->N = N;
// qp solver
config->qp_solver = ocp_qp_xcond_solver_config_assign(c_ptr);
c_ptr += ocp_qp_xcond_solver_config_calculate_size();
// regularization
config->regularize = ocp_nlp_reg_config_assign(c_ptr);
c_ptr += ocp_nlp_reg_config_calculate_size();
// dynamics
config->dynamics = (ocp_nlp_dynamics_config **) c_ptr;
c_ptr += N * sizeof(ocp_nlp_dynamics_config *);
for (int i = 0; i < N; i++)
{
config->dynamics[i] = ocp_nlp_dynamics_config_assign(c_ptr);
c_ptr += ocp_nlp_dynamics_config_calculate_size();
}
// cost
config->cost = (ocp_nlp_cost_config **) c_ptr;
c_ptr += (N + 1) * sizeof(ocp_nlp_cost_config *);
for (int i = 0; i <= N; i++)
{
config->cost[i] = ocp_nlp_cost_config_assign(c_ptr);
c_ptr += ocp_nlp_cost_config_calculate_size();
}
// constraints
config->constraints = (ocp_nlp_constraints_config **) c_ptr;
c_ptr += (N + 1) * sizeof(ocp_nlp_constraints_config *);
for (int i = 0; i <= N; i++)
{
config->constraints[i] = ocp_nlp_constraints_config_assign(c_ptr);
c_ptr += ocp_nlp_constraints_config_calculate_size();
}
return config;
}
/************************************************
* dims
************************************************/
static acados_size_t ocp_nlp_dims_calculate_size_self(int N)
{
acados_size_t size = 0;
size += sizeof(ocp_nlp_dims);
// nlp sizes
size += 6 * (N + 1) * sizeof(int); // nv, nx, nu, ni, nz, ns
// dynamics
size += N * sizeof(void *);
// cost
size += (N + 1) * sizeof(void *);
// constraints
size += (N + 1) * sizeof(void *);
// regularization
size += ocp_nlp_reg_dims_calculate_size(N);
size += sizeof(ocp_nlp_reg_dims);
size += 8; // initial align
size += 8; // intermediate align
make_int_multiple_of(8, &size);
return size;
}
acados_size_t ocp_nlp_dims_calculate_size(void *config_)
{
ocp_nlp_config *config = config_;
int N = config->N;
acados_size_t size = 0;
// self
size += ocp_nlp_dims_calculate_size_self(N);
// dynamics
for (int i = 0; i < N; i++)
size += config->dynamics[i]->dims_calculate_size(config->dynamics[i]);
// cost
for (int i = 0; i <= N; i++) size += config->cost[i]->dims_calculate_size(config->cost[i]);
// constraints
for (int i = 0; i <= N; i++)
size += config->constraints[i]->dims_calculate_size(config->constraints[i]);
// qp solver
size += config->qp_solver->dims_calculate_size(config->qp_solver, N);
return size;
}
static ocp_nlp_dims *ocp_nlp_dims_assign_self(int N, void *raw_memory)
{
char *c_ptr = (char *) raw_memory;
// initial align
align_char_to(8, &c_ptr);
// struct
ocp_nlp_dims *dims = (ocp_nlp_dims *) c_ptr;
c_ptr += sizeof(ocp_nlp_dims);
// dynamics
dims->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
// cost
dims->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
// constraints
dims->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
// nv
assign_and_advance_int(N + 1, &dims->nv, &c_ptr);
// nx
assign_and_advance_int(N + 1, &dims->nx, &c_ptr);
// nu
assign_and_advance_int(N + 1, &dims->nu, &c_ptr);
// ni
assign_and_advance_int(N + 1, &dims->ni, &c_ptr);
// nz
assign_and_advance_int(N + 1, &dims->nz, &c_ptr);
// ns
assign_and_advance_int(N + 1, &dims->ns, &c_ptr);
// intermediate align
align_char_to(8, &c_ptr);
// regularization
dims->regularize = ocp_nlp_reg_dims_assign(N, c_ptr);
c_ptr += ocp_nlp_reg_dims_calculate_size(N);
/* initialize qp_solver dimensions */
// dims->qp_solver->N = N;
// for (int i = 0; i <= N; i++)
// {
// TODO(dimitris): values below are needed for reformulation of QP when soft constraints
// are not supported. Make this a bit more transparent as it clushes with nbx/nbu above.
// dims->qp_solver->nsbx[i] = 0;
// dims->qp_solver->nsbu[i] = 0;
// dims->qp_solver->nsg[i] = 0;
// }
// N
dims->N = N;
// initialize dimensions to zero by default
// nv
for(int i=0; i<=N; i++)
dims->nv[i] = 0;
// nx
for(int i=0; i<=N; i++)
dims->nx[i] = 0;
// nu
for(int i=0; i<=N; i++)
dims->nu[i] = 0;
// ni
for(int i=0; i<=N; i++)
dims->ni[i] = 0;
// nz
for(int i=0; i<=N; i++)
dims->nz[i] = 0;
// ns
for(int i=0; i<=N; i++)
dims->ns[i] = 0;
// TODO initialize dims to zero by default also in modules !!!!!!!
// assert
assert((char *) raw_memory + ocp_nlp_dims_calculate_size_self(N) >= c_ptr);
return dims;
}
ocp_nlp_dims *ocp_nlp_dims_assign(void *config_, void *raw_memory)
{
ocp_nlp_config *config = config_;
int N = config->N;
char *c_ptr = (char *) raw_memory;
// self
ocp_nlp_dims *dims = ocp_nlp_dims_assign_self(N, c_ptr);
c_ptr += ocp_nlp_dims_calculate_size_self(N);
// dynamics
for (int i = 0; i < N; i++)
{
dims->dynamics[i] = config->dynamics[i]->dims_assign(config->dynamics[i], c_ptr);
c_ptr += config->dynamics[i]->dims_calculate_size(config->dynamics[i]);
}
// cost
for (int i = 0; i <= N; i++)
{
dims->cost[i] = config->cost[i]->dims_assign(config->cost[i], c_ptr);
c_ptr += config->cost[i]->dims_calculate_size(config->cost[i]);
}
// constraints
for (int i = 0; i <= N; i++)
{
dims->constraints[i] =
config->constraints[i]->dims_assign(config->constraints[i], c_ptr);
c_ptr += config->constraints[i]->dims_calculate_size(config->constraints[i]);
}
// qp solver
dims->qp_solver = config->qp_solver->dims_assign(config->qp_solver, N, c_ptr);
c_ptr += config->qp_solver->dims_calculate_size(config->qp_solver, N);
// assert
assert((char *) raw_memory + ocp_nlp_dims_calculate_size(config_) >= c_ptr);
return dims;
}
void ocp_nlp_dims_set_opt_vars(void *config_, void *dims_, const char *field,
const void* value_array)
{
// to set dimension nx, nu, nz, ns (number of slacks = number of soft constraints)
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
int N = config->N;
int *int_array = (int *) value_array;
/* set ocp_nlp dimension */
if (!strcmp(field, "nx"))
{
// opt var
for (int i = 0; i <= N; i++)
{
// set nx
dims->nx[i] = int_array[i];
// update nv
dims->nv[i] = dims->nu[i] + dims->nx[i] + 2 * dims->ns[i];
}
// cost
for (int i = 0; i <= N; i++)
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], "nx", &int_array[i]);
}
// dynamics
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nx", &int_array[i]);
}
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nx1", &int_array[i+1]);
}
// constraints
for (int i = 0; i <= N; i++)
{
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
"nx", &int_array[i]);
}
// qp solver
for (int i = 0; i <= N; i++)
{
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nx", &int_array[i]);
}
// regularization
for (int i = 0; i <= N; i++)
{
config->regularize->dims_set(config->regularize, dims->regularize, i, "nx", &int_array[i]);
}
}
else if (!strcmp(field, "nu"))
{
// nlp opt var
for (int i = 0; i <= N; i++)
{
// set nu
dims->nu[i] = int_array[i];
// update nv
dims->nv[i] = dims->nu[i] + dims->nx[i] + 2 * dims->ns[i];
}
// cost
for (int i = 0; i <= N; i++)
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], "nu", &int_array[i]);
}
// dynamics
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nu", &int_array[i]);
}
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nu1", &int_array[i+1]);
}
// constraints
for (int i = 0; i <= N; i++)
{
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
"nu", &int_array[i]);
}
// qp solver
for (int i = 0; i <= N; i++)
{
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nu", &int_array[i]);
}
// regularization
for (int i = 0; i <= N; i++)
{
config->regularize->dims_set(config->regularize, dims->regularize, i, "nu", &int_array[i]);
}
}
else if (!strcmp(field, "nz"))
{
// nlp opt var
for (int i = 0; i <= N; i++)
{
// set nz
dims->nz[i] = int_array[i];
}
// cost
for (int i = 0; i <= N; i++)
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], "nz", &int_array[i]);
}
// dynamics
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nz", &int_array[i]);
}
// constraints
for (int i = 0; i <= N; i++)
{
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
"nz", &int_array[i]);
}
}
else if (!strcmp(field, "ns"))
{
// nlp opt var
for (int i = 0; i <= N; i++)
{
// set ns
dims->ns[i] = int_array[i];
// update nv
dims->nv[i] = dims->nu[i] + dims->nx[i] + 2 * dims->ns[i];
}
// cost
for (int i = 0; i <= N; i++)
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], "ns", &int_array[i]);
}
// qp solver
for (int i = 0; i <= N; i++)
{
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ns",
&int_array[i]);
}
}
else
{
printf("error: dims type not available in module ocp_nlp: %s", field);
exit(1);
}
#if 0
/* set ocp_nlp submodule dimensions */
if (strcmp(field, "ns")) // dynamics do not contain slack/soft constraints
{
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], field, &int_array[i]);
}
}
if (!strcmp(field, "nu"))
{
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nu1", &int_array[i+1]);
}
}
if (!strcmp(field, "nx"))
{
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nx1", &int_array[i+1]);
}
}
for (int i = 0; i <= N; i++) // cost
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], field, &int_array[i]);
}
for (int i = 0; i <= N; i++) // constraints
{
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
field, &int_array[i]);
}
if (strcmp(field, "nz")) // qp_solver does not contain nz
{
for (int i = 0; i <= N; i++) // qp_solver
{
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field,
&int_array[i]);
}
}
#endif
}
void ocp_nlp_dims_set_constraints(void *config_, void *dims_, int stage, const char *field,
const void* value_)
{
// to set dimension nbx, nbu, ng, nh, nq (quadratic over nonlinear)
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
int *int_value = (int *) value_;
int i = stage;
// set in constraint module
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
field, int_value);
// update ni in ocp_nlp dimensions
config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i],
"ni", &dims->ni[i]);
// update qp_solver dims
if ( (!strcmp(field, "nbx")) || (!strcmp(field, "nbu")) )
{
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value);
// regularization
config->regularize->dims_set(config->regularize, dims->regularize, i, (char *) field, int_value);
}
else if ( (!strcmp(field, "nsbx")) || (!strcmp(field, "nsbu")) )
{
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value);
}
else if ( (!strcmp(field, "ng")) || (!strcmp(field, "nh")) || (!strcmp(field, "nphi")))
{
// update ng_qp_solver in qp_solver
int ng_qp_solver;
config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i],
"ng_qp_solver", &ng_qp_solver);
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ng", &ng_qp_solver);
// regularization
config->regularize->dims_set(config->regularize, dims->regularize, i, "ng", &ng_qp_solver);
}
else if ( (!strcmp(field, "nsg")) || (!strcmp(field, "nsh")) || (!strcmp(field, "nsphi")))
{
// update ng_qp_solver in qp_solver
int nsg_qp_solver;
config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "nsg_qp_solver", &nsg_qp_solver);
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nsg", &nsg_qp_solver);
}
else if ( (!strcmp(field, "nbxe")) || (!strcmp(field, "nbue")) )
{
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value);
}
else if ( (!strcmp(field, "nge")) || (!strcmp(field, "nhe")) || (!strcmp(field, "nphie")))
{
// update ng_qp_solver in qp_solver
int ng_qp_solver;
config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i],
"nge_qp_solver", &ng_qp_solver);
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nge", &ng_qp_solver);
}
}
void ocp_nlp_dims_set_cost(void *config_, void *dims_, int stage,
const char *field, const void* value_)
{
// to set dimension ny (output)
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
int *int_value = (int *) value_;
config->cost[stage]->dims_set(config->cost[stage], dims->cost[stage], field, int_value);
}
void ocp_nlp_dims_set_dynamics(void *config_, void *dims_, int stage,
const char *field, const void* value)
{
// mainly for gnsf dimensions
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
int *int_value = (int *) value;
config->dynamics[stage]->dims_set(config->dynamics[stage], dims->dynamics[stage], field, int_value);
}
/************************************************
* in
************************************************/
acados_size_t ocp_nlp_in_calculate_size_self(int N)
{
acados_size_t size = sizeof(ocp_nlp_in);
size += N * sizeof(double); // Ts
size += N * sizeof(void *); // dynamics
size += (N + 1) * sizeof(void *); // cost
size += (N + 1) * sizeof(void *); // constraints
size += 3*8; // aligns
return size;
}
acados_size_t ocp_nlp_in_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims)
{
int N = dims->N;
acados_size_t size = ocp_nlp_in_calculate_size_self(N);
// dynamics
for (int i = 0; i < N; i++)
{
size +=
config->dynamics[i]->model_calculate_size(config->dynamics[i], dims->dynamics[i]);
}
// cost
for (int i = 0; i <= N; i++)
{
size += config->cost[i]->model_calculate_size(config->cost[i], dims->cost[i]);
}
// constraints
for (int i = 0; i <= N; i++)
{
size += config->constraints[i]->model_calculate_size(config->constraints[i],
dims->constraints[i]);
}
// make_int_multiple_of(64, &size);
return size;
}
ocp_nlp_in *ocp_nlp_in_assign_self(int N, void *raw_memory)
{
char *c_ptr = (char *) raw_memory;
// initial align
align_char_to(8, &c_ptr);
// struct
ocp_nlp_in *in = (ocp_nlp_in *) c_ptr;
c_ptr += sizeof(ocp_nlp_in);
// align
align_char_to(8, &c_ptr);
// Ts
assign_and_advance_double(N, &in->Ts, &c_ptr);
// dynamics
in->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
// cost
in->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
// constraints
in->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
align_char_to(8, &c_ptr);
assert((char *) raw_memory + ocp_nlp_in_calculate_size_self(N) >= c_ptr);
return in;
}
ocp_nlp_in *ocp_nlp_in_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory)
{
int N = dims->N;
char *c_ptr = (char *) raw_memory;
// struct
ocp_nlp_in *in = ocp_nlp_in_assign_self(N, c_ptr);
c_ptr += ocp_nlp_in_calculate_size_self(N);
// dynamics
for (int i = 0; i < N; i++)
{
in->dynamics[i] =
config->dynamics[i]->model_assign(config->dynamics[i], dims->dynamics[i], c_ptr);
c_ptr +=
config->dynamics[i]->model_calculate_size(config->dynamics[i], dims->dynamics[i]);
}
// cost
for (int i = 0; i <= N; i++)
{
in->cost[i] = config->cost[i]->model_assign(config->cost[i], dims->cost[i], c_ptr);
c_ptr += config->cost[i]->model_calculate_size(config->cost[i], dims->cost[i]);
}
// constraints
for (int i = 0; i <= N; i++)
{
in->constraints[i] = config->constraints[i]->model_assign(config->constraints[i],
dims->constraints[i], c_ptr);
c_ptr += config->constraints[i]->model_calculate_size(config->constraints[i],
dims->constraints[i]);
}
assert((char *) raw_memory + ocp_nlp_in_calculate_size(config, dims) >= c_ptr);
return in;
}
/************************************************
* out
************************************************/
acados_size_t ocp_nlp_out_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims)
{
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
int *nz = dims->nz;
acados_size_t size = sizeof(ocp_nlp_out);
size += 4 * (N + 1) * sizeof(struct blasfeo_dvec); // ux, lam, t, z
size += 1 * N * sizeof(struct blasfeo_dvec); // pi
for (int i = 0; i < N; i++)
{
size += 1 * blasfeo_memsize_dvec(nv[i]); // ux
size += 1 * blasfeo_memsize_dvec(nz[i]); // z
size += 2 * blasfeo_memsize_dvec(2 * ni[i]); // lam, t
size += 1 * blasfeo_memsize_dvec(nx[i + 1]); // pi
}
size += 1 * blasfeo_memsize_dvec(nv[N]); // ux
size += 1 * blasfeo_memsize_dvec(nz[N]); // z
size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // lam, t
size += 8; // initial align
size += 8; // blasfeo_struct align
size += 64; // blasfeo_mem align
make_int_multiple_of(8, &size);
return size;
}
ocp_nlp_out *ocp_nlp_out_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory)
{
// extract sizes
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
int *nz = dims->nz;
char *c_ptr = (char *) raw_memory;
// initial align
align_char_to(8, &c_ptr);
ocp_nlp_out *out = (ocp_nlp_out *) c_ptr;
c_ptr += sizeof(ocp_nlp_out);
// blasfeo_struct align
align_char_to(8, &c_ptr);
// blasfeo_dvec_struct
// ux
assign_and_advance_blasfeo_dvec_structs(N + 1, &out->ux, &c_ptr);
// z
assign_and_advance_blasfeo_dvec_structs(N + 1, &out->z, &c_ptr);
// pi
assign_and_advance_blasfeo_dvec_structs(N, &out->pi, &c_ptr);
// lam
assign_and_advance_blasfeo_dvec_structs(N + 1, &out->lam, &c_ptr);
// t
assign_and_advance_blasfeo_dvec_structs(N + 1, &out->t, &c_ptr);
// blasfeo_mem align
align_char_to(64, &c_ptr);
// blasfeo_dvec
// ux
for (int i = 0; i <= N; ++i)
{
assign_and_advance_blasfeo_dvec_mem(nv[i], out->ux + i, &c_ptr);
}
// z
for (int i = 0; i <= N; ++i)
{
assign_and_advance_blasfeo_dvec_mem(nz[i], out->z + i, &c_ptr);
}
// pi
for (int i = 0; i < N; ++i)
{
assign_and_advance_blasfeo_dvec_mem(nx[i + 1], out->pi + i, &c_ptr);
}
// lam
for (int i = 0; i <= N; ++i)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[i], out->lam + i, &c_ptr);
}
// t
for (int i = 0; i <= N; ++i)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[i], out->t + i, &c_ptr);
}
// zero solution
for(int i=0; i<N; i++)
{
blasfeo_dvecse(nv[i], 0.0, out->ux+i, 0);
blasfeo_dvecse(nz[i], 0.0, out->z+i, 0);
blasfeo_dvecse(nx[i+1], 0.0, out->pi+i, 0);
blasfeo_dvecse(2*ni[i], 0.0, out->lam+i, 0);
blasfeo_dvecse(2*ni[i], 0.0, out->t+i, 0);
}
blasfeo_dvecse(nv[N], 0.0, out->ux+N, 0);
blasfeo_dvecse(nz[N], 0.0, out->z+N, 0);
blasfeo_dvecse(2*ni[N], 0.0, out->lam+N, 0);
blasfeo_dvecse(2*ni[N], 0.0, out->t+N, 0);
assert((char *) raw_memory + ocp_nlp_out_calculate_size(config, dims) >= c_ptr);
return out;
}
/************************************************
* options
************************************************/
acados_size_t ocp_nlp_opts_calculate_size(void *config_, void *dims_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
acados_size_t size = 0;
size += sizeof(ocp_nlp_opts);
size += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver);
size += config->regularize->opts_calculate_size();
// dynamics
size += N * sizeof(void *);
for (int i = 0; i < N; i++)
{
size += dynamics[i]->opts_calculate_size(dynamics[i], dims->dynamics[i]);
}
// cost
size += (N + 1) * sizeof(void *);
for (int i = 0; i <= N; i++)
{
size += cost[i]->opts_calculate_size(cost[i], dims->cost[i]);
}
// constraints
size += (N + 1) * sizeof(void *);
for (int i = 0; i <= N; i++)
{
size += constraints[i]->opts_calculate_size(constraints[i], dims->constraints[i]);
}
size += 2*8; // 2 aligns
return size;
}
void *ocp_nlp_opts_assign(void *config_, void *dims_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
char *c_ptr = (char *) raw_memory;
align_char_to(8, &c_ptr);
ocp_nlp_opts *opts = (ocp_nlp_opts *) c_ptr;
c_ptr += sizeof(ocp_nlp_opts);
/* pointers to substructures */
opts->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
opts->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
opts->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
align_char_to(8, &c_ptr);
/* substructures */
opts->qp_solver_opts = qp_solver->opts_assign(qp_solver, dims->qp_solver, c_ptr);
c_ptr += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver);
opts->regularize = config->regularize->opts_assign(c_ptr);
c_ptr += config->regularize->opts_calculate_size();
// dynamics
for (int i = 0; i < N; i++)
{
opts->dynamics[i] = dynamics[i]->opts_assign(dynamics[i], dims->dynamics[i], c_ptr);
c_ptr += dynamics[i]->opts_calculate_size(dynamics[i], dims->dynamics[i]);
}
// cost
for (int i = 0; i <= N; i++)
{
opts->cost[i] = cost[i]->opts_assign(cost[i], dims->cost[i], c_ptr);
c_ptr += cost[i]->opts_calculate_size(cost[i], dims->cost[i]);
}
// constraints
for (int i = 0; i <= N; i++)
{
opts->constraints[i] =
constraints[i]->opts_assign(constraints[i], dims->constraints[i], c_ptr);
c_ptr += constraints[i]->opts_calculate_size(constraints[i], dims->constraints[i]);
}
assert((char *) raw_memory + ocp_nlp_opts_calculate_size(config, dims) >= c_ptr);
return opts;
}
void ocp_nlp_opts_initialize_default(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
ocp_nlp_reg_config *regularize = config->regularize;
int N = dims->N;
opts->reuse_workspace = 1;
#if defined(ACADOS_WITH_OPENMP)
#if defined(ACADOS_NUM_THREADS)
opts->num_threads = ACADOS_NUM_THREADS;
// printf("\nocp_nlp: openmp threads from macro = %d\n", opts->num_threads);
#else
opts->num_threads = omp_get_max_threads();
// printf("\nocp_nlp: omp_get_max_threads %d", omp_get_max_threads());
#endif
#endif
// printf("\nocp_nlp: openmp threads = %d\n", opts->num_threads);
opts->globalization = FIXED_STEP;
opts->print_level = 0;
opts->step_length = 1.0;
opts->levenberg_marquardt = 0.0;
/* submodules opts */
// qp solver
qp_solver->opts_initialize_default(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
regularize->opts_initialize_default(regularize, dims->regularize, opts->regularize);
// dynamics
for (int i = 0; i < N; i++)
{
dynamics[i]->opts_initialize_default(dynamics[i], dims->dynamics[i], opts->dynamics[i]);
}
// cost
for (int i = 0; i <= N; i++)
{
cost[i]->opts_initialize_default(cost[i], dims->cost[i], opts->cost[i]);
}
// constraints
for (int i = 0; i <= N; i++)
{
constraints[i]->opts_initialize_default(constraints[i], dims->constraints[i], opts->constraints[i]);
}
// globalization
opts->alpha_min = 0.05;
opts->alpha_reduction = 0.7;
opts->full_step_dual = 0;
opts->line_search_use_sufficient_descent = 0;
opts->globalization_use_SOC = 0;
opts->eps_sufficient_descent = 1e-4; // Leineweber1999: MUSCOD-I eps_T = 1e-4 (p.89); Note: eps_T = 0.1 originally proposed by Powell 1978 (Leineweber 1999, p. 53)
return;
}
void ocp_nlp_opts_update(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
qp_solver->opts_update(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// dynamics
for (int i = 0; i < N; i++)
{
dynamics[i]->opts_update(dynamics[i], dims->dynamics[i], opts->dynamics[i]);
}
// cost
for (int i = 0; i <= N; i++)
{
cost[i]->opts_update(cost[i], dims->cost[i], opts->cost[i]);
}
// constraints
for (int i = 0; i <= N; i++)
{
constraints[i]->opts_update(constraints[i], dims->constraints[i], opts->constraints[i]);
}
return;
}
void ocp_nlp_opts_set(void *config_, void *opts_, const char *field, void* value)
{
ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_;
ocp_nlp_config *config = config_;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name, i.e. substring in field before '_'
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (int i=0; i<module_length; i++)
module[i] = field[i];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to QP module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) )
{
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts,
field+module_length+1, value);
}
else // nlp opts
{
if (!strcmp(field, "reuse_workspace"))
{
int* reuse_workspace = (int *) value;
opts->reuse_workspace = *reuse_workspace;
}
else if (!strcmp(field, "num_threads"))
{
int* num_threads = (int *) value;
opts->num_threads = *num_threads;
}
else if (!strcmp(field, "step_length"))
{
double* step_length = (double *) value;
opts->step_length = *step_length;
}
else if (!strcmp(field, "alpha_reduction"))
{
double* alpha_reduction = (double *) value;
opts->alpha_reduction = *alpha_reduction;
}
else if (!strcmp(field, "alpha_min"))
{
double* alpha_min = (double *) value;
opts->alpha_min = *alpha_min;
}
else if (!strcmp(field, "eps_sufficient_descent"))
{
double* eps_sufficient_descent = (double *) value;
opts->eps_sufficient_descent = *eps_sufficient_descent;
}
else if (!strcmp(field, "full_step_dual"))
{
int* full_step_dual = (int *) value;
opts->full_step_dual = *full_step_dual;
}
else if (!strcmp(field, "line_search_use_sufficient_descent"))
{
int* line_search_use_sufficient_descent = (int *) value;
opts->line_search_use_sufficient_descent = *line_search_use_sufficient_descent;
}
else if (!strcmp(field, "globalization_use_SOC"))
{
int* globalization_use_SOC = (int *) value;
opts->globalization_use_SOC = *globalization_use_SOC;
}
else if (!strcmp(field, "globalization"))
{
char* globalization = (char *) value;
if (!strcmp(globalization, "fixed_step"))
{
opts->globalization = FIXED_STEP;
}
else if (!strcmp(globalization, "merit_backtracking"))
{
opts->globalization = MERIT_BACKTRACKING;
}
else
{
printf("\nerror: ocp_nlp_opts_set: not supported value for globalization, got: %s\n",
globalization);
exit(1);
}
}
else if (!strcmp(field, "levenberg_marquardt"))
{
double* levenberg_marquardt = (double *) value;
opts->levenberg_marquardt = *levenberg_marquardt;
}
else if (!strcmp(field, "exact_hess"))
{
int N = config->N;
// cost
for (int i=0; i<=N; i++)
config->cost[i]->opts_set(config->cost[i], opts->cost[i], "exact_hess", value);
// dynamics
for (int i=0; i<N; i++)
config->dynamics[i]->opts_set(config->dynamics[i], opts->dynamics[i],
"compute_hess", value);
// constraints
for (int i=0; i<=N; i++)
config->constraints[i]->opts_set(config->constraints[i], opts->constraints[i],
"compute_hess", value);
}
// selectively turn on exact hessian contributions
else if (!strcmp(field, "exact_hess_cost"))
{
int N = config->N;
for (int i=0; i<=N; i++)
config->cost[i]->opts_set(config->cost[i], opts->cost[i], "exact_hess", value);
}
else if (!strcmp(field, "exact_hess_dyn"))
{
int N = config->N;
for (int i=0; i<N; i++)
config->dynamics[i]->opts_set(config->dynamics[i], opts->dynamics[i],
"compute_hess", value);
}
else if (!strcmp(field, "exact_hess_constr"))
{
int N = config->N;
for (int i=0; i<=N; i++)
config->constraints[i]->opts_set(config->constraints[i], opts->constraints[i],
"compute_hess", value);
}
else if (!strcmp(field, "print_level"))
{
int* print_level = (int *) value;
if (*print_level < 0)
{
printf("\nerror: ocp_nlp_opts_set: invalid value for print_level field, need int >=0, got %d.\n", *print_level);
exit(1);
}
opts->print_level = *print_level;
}
else
{
printf("\nerror: ocp_nlp_opts_set: wrong field: %s\n", field);
exit(1);
}
}
return;
}
void ocp_nlp_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value)
{
ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_;
ocp_nlp_config *config = config_;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (int i=0; i<module_length; i++)
module[i] = field[i];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to dynamics module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "dynamics")) )
{
config->dynamics[stage]->opts_set( config->dynamics[stage], opts->dynamics[stage],
field+module_length+1, value );
}
// pass options to cost module
else if ( ptr_module!=NULL && (!strcmp(ptr_module, "cost")) )
{
config->cost[stage]->opts_set( config->cost[stage], opts->cost[stage],
field+module_length+1, value);
}
// pass options to constraint module
else if ( ptr_module!=NULL && (!strcmp(ptr_module, "constraints")) )
{
config->constraints[stage]->opts_set( config->constraints[stage], opts->constraints[stage],
(char *) field+module_length+1, value);
}
else
{
printf("\nerror: ocp_nlp_opts_set_at_stage: wrong field: %s\n", field);
exit(1);
}
return;
}
/************************************************
* memory
************************************************/
acados_size_t ocp_nlp_memory_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts)
{
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nz = dims->nz;
int *nu = dims->nu;
int *ni = dims->ni;
acados_size_t size = sizeof(ocp_nlp_memory);
// qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// qp solver
size += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
size += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize);
// dynamics
size += N * sizeof(void *);
for (int i = 0; i < N; i++)
{
size += dynamics[i]->memory_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]);
}
// cost
size += (N + 1) * sizeof(void *);
for (int i = 0; i <= N; i++)
{
size += cost[i]->memory_calculate_size(cost[i], dims->cost[i], opts->cost[i]);
}
// constraints
size += (N + 1) * sizeof(void *);
for (int i = 0; i <= N; i++)
{
size += constraints[i]->memory_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]);
}
// nlp res
size += ocp_nlp_res_calculate_size(dims);
size += (N+1)*sizeof(bool); // set_sim_guess
size += (N+1)*sizeof(struct blasfeo_dmat); // dzduxt
size += 6*(N+1)*sizeof(struct blasfeo_dvec); // cost_grad ineq_fun ineq_adj dyn_adj sim_guess z_alg
size += 1*N*sizeof(struct blasfeo_dvec); // dyn_fun
for (int i = 0; i < N; i++)
{
size += 1*blasfeo_memsize_dmat(nu[i]+nx[i], nz[i]); // dzduxt
size += 1*blasfeo_memsize_dvec(nz[i]); // z_alg
size += 2*blasfeo_memsize_dvec(nv[i]); // cost_grad ineq_adj
size += 1*blasfeo_memsize_dvec(nu[i] + nx[i]); // dyn_adj
size += 1*blasfeo_memsize_dvec(nx[i + 1]); // dyn_fun
size += 1*blasfeo_memsize_dvec(2 * ni[i]); // ineq_fun
size += 1*blasfeo_memsize_dvec(nx[i] + nz[i]); // sim_guess
}
size += 1*blasfeo_memsize_dmat(nu[N]+nx[N], nz[N]); // dzduxt
size += 1*blasfeo_memsize_dvec(nz[N]); // z_alg
size += 2*blasfeo_memsize_dvec(nv[N]); // cost_grad ineq_adj
size += 1*blasfeo_memsize_dvec(nu[N] + nx[N]); // dyn_adj
size += 1*blasfeo_memsize_dvec(2 * ni[N]); // ineq_fun
size += 1*blasfeo_memsize_dvec(nx[N] + nz[N]); // sim_guess
size += 8; // initial align
size += 8; // middle align
size += 8; // blasfeo_struct align
size += 64; // blasfeo_mem align
make_int_multiple_of(8, &size);
return size;
}
ocp_nlp_memory *ocp_nlp_memory_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts, void *raw_memory)
{
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
// extract sizes
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nz = dims->nz;
int *nu = dims->nu;
int *ni = dims->ni;
char *c_ptr = (char *) raw_memory;
// initial align
align_char_to(8, &c_ptr);
// struct
ocp_nlp_memory *mem = (ocp_nlp_memory *) c_ptr;
c_ptr += sizeof(ocp_nlp_memory);
/* pointers to substructures */
// dynamics
mem->dynamics = (void **) c_ptr;
c_ptr += N*sizeof(void *);
// cost
mem->cost = (void **) c_ptr;
c_ptr += (N+1)*sizeof(void *);
// constraints
mem->constraints = (void **) c_ptr;
c_ptr += (N+1)*sizeof(void *);
// middle align
align_char_to(8, &c_ptr);
/* substructures */
// qp in
mem->qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
mem->qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// QP solver
mem->qp_solver_mem = qp_solver->memory_assign(qp_solver, dims->qp_solver, opts->qp_solver_opts, c_ptr);
c_ptr += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
mem->regularize_mem = config->regularize->memory_assign(config->regularize, dims->regularize,
opts->regularize, c_ptr);
c_ptr += config->regularize->memory_calculate_size(config->regularize, dims->regularize,
opts->regularize);
// dynamics
for (int i = 0; i < N; i++)
{
mem->dynamics[i] = dynamics[i]->memory_assign(dynamics[i], dims->dynamics[i], opts->dynamics[i], c_ptr);
c_ptr += dynamics[i]->memory_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]);
}
// cost
for (int i = 0; i <= N; i++)
{
mem->cost[i] = cost[i]->memory_assign(cost[i], dims->cost[i], opts->cost[i], c_ptr);
c_ptr += cost[i]->memory_calculate_size(cost[i], dims->cost[i], opts->cost[i]);
}
// constraints
for (int i = 0; i <= N; i++)
{
mem->constraints[i] = constraints[i]->memory_assign(constraints[i],
dims->constraints[i], opts->constraints[i], c_ptr);
c_ptr += constraints[i]->memory_calculate_size( constraints[i], dims->constraints[i],
opts->constraints[i]);
}
// nlp res
mem->nlp_res = ocp_nlp_res_assign(dims, c_ptr);
c_ptr += mem->nlp_res->memsize;
// blasfeo_struct align
align_char_to(8, &c_ptr);
// dzduxt
assign_and_advance_blasfeo_dmat_structs(N + 1, &mem->dzduxt, &c_ptr);
// z_alg
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->z_alg, &c_ptr);
// cost_grad
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->cost_grad, &c_ptr);
// ineq_fun
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_fun, &c_ptr);
// ineq_adj
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_adj, &c_ptr);
// dyn_fun
assign_and_advance_blasfeo_dvec_structs(N, &mem->dyn_fun, &c_ptr);
// dyn_adj
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->dyn_adj, &c_ptr);
// sim_guess
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->sim_guess, &c_ptr);
// set_sim_guess
assign_and_advance_bool(N+1, &mem->set_sim_guess, &c_ptr);
for (int i = 0; i <= N; ++i)
{
mem->set_sim_guess[i] = false;
}
// blasfeo_mem align
align_char_to(64, &c_ptr);
// dzduxt
for (int i=0; i<=N; i++)
{
assign_and_advance_blasfeo_dmat_mem(nu[i]+nx[i], nz[i], mem->dzduxt+i, &c_ptr);
}
// z_alg
for (int i=0; i<=N; i++)
{
blasfeo_create_dvec(nz[i], mem->z_alg+i, c_ptr);
c_ptr += blasfeo_memsize_dvec(nz[i]);
}
// cost_grad
for (int i = 0; i <= N; i++)
{
assign_and_advance_blasfeo_dvec_mem(nv[i], mem->cost_grad + i, &c_ptr);
}
// ineq_fun
for (int i = 0; i <= N; i++)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[i], mem->ineq_fun + i, &c_ptr);
}
// ineq_adj
for (int i = 0; i <= N; i++)
{
assign_and_advance_blasfeo_dvec_mem(nv[i], mem->ineq_adj + i, &c_ptr);
}
// dyn_fun
for (int i = 0; i < N; i++)
{
assign_and_advance_blasfeo_dvec_mem(nx[i + 1], mem->dyn_fun + i, &c_ptr);
}
// dyn_adj
for (int i = 0; i <= N; i++)
{
assign_and_advance_blasfeo_dvec_mem(nu[i] + nx[i], mem->dyn_adj + i, &c_ptr);
}
// sim_guess
for (int i = 0; i <= N; i++)
{
assign_and_advance_blasfeo_dvec_mem(nx[i] + nz[i], mem->sim_guess + i, &c_ptr);
// set to 0;
blasfeo_dvecse(nx[i] + nz[i], 0.0, mem->sim_guess+i, 0);
// printf("sim_guess i %d: %p\n", i, mem->sim_guess+i);
}
// printf("created memory %p\n", mem);
return mem;
}
/************************************************
* workspace
************************************************/
acados_size_t ocp_nlp_workspace_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts)
{
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
// int *nz = dims->nz;
acados_size_t size = 0;
// nlp
size += sizeof(ocp_nlp_workspace);
// tmp_nlp_out
size += ocp_nlp_out_calculate_size(config, dims);
// weight_merit_fun
size += ocp_nlp_out_calculate_size(config, dims);
// blasfeo_dvec
int nxu_max = 0;
int nx_max = 0;
int ni_max = 0;
for (int i = 0; i <= N; i++)
{
nx_max = nx_max > nx[i] ? nx_max : nx[i];
nxu_max = nxu_max > (nx[i]+nu[i]) ? nxu_max : (nx[i]+nu[i]);
ni_max = ni_max > ni[i] ? ni_max : ni[i];
}
size += 1 * blasfeo_memsize_dvec(nx_max);
size += 1 * blasfeo_memsize_dvec(nxu_max);
size += 1 * blasfeo_memsize_dvec(ni_max);
// array of pointers
// cost
size += (N+1)*sizeof(void *);
// dynamics
size += N*sizeof(void *);
// constraints
size += (N+1)*sizeof(void *);
// module workspace
if (opts->reuse_workspace)
{
#if defined(ACADOS_WITH_OPENMP)
// qp solver
size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (int i = 0; i < N; i++)
{
size += dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]);
}
// cost
for (int i = 0; i <= N; i++)
{
size += cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]);
}
// constraints
for (int i = 0; i <= N; i++)
{
size += constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]);
}
#else
acados_size_t size_tmp = 0;
int tmp;
// qp solver
tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
// dynamics
for (int i = 0; i < N; i++)
{
tmp = dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// cost
for (int i = 0; i <= N; i++)
{
tmp = cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// constraints
for (int i = 0; i <= N; i++)
{
tmp = constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
size += size_tmp;
#endif
}
else
{
// qp solver
size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (int i = 0; i < N; i++)
{
size += dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]);
}
// cost
for (int i = 0; i <= N; i++)
{
size += cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]);
}
// constraints
for (int i = 0; i <= N; i++)
{
size += constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]);
}
}
size += 8; // struct align
return size;
}
ocp_nlp_workspace *ocp_nlp_workspace_assign(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_opts *opts, ocp_nlp_memory *mem, void *raw_memory)
{
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
int *nx = dims->nx;
// int *nv = dims->nv;
int *nu = dims->nu;
int *ni = dims->ni;
// int *nz = dims->nz;
char *c_ptr = (char *) raw_memory;
ocp_nlp_workspace *work = (ocp_nlp_workspace *) c_ptr;
c_ptr += sizeof(ocp_nlp_workspace);
/* pointers to substructures */
//
work->dynamics = (void **) c_ptr;
c_ptr += N*sizeof(void *);
//
work->cost = (void **) c_ptr;
c_ptr += (N+1)*sizeof(void *);
//
work->constraints = (void **) c_ptr;
c_ptr += (N+1)*sizeof(void *);
align_char_to(8, &c_ptr);
/* substructures */
// tmp_nlp_out
work->tmp_nlp_out = ocp_nlp_out_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_out_calculate_size(config, dims);
// weight_merit_fun
work->weight_merit_fun = ocp_nlp_out_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_out_calculate_size(config, dims);
// blasfeo_dvec
int nxu_max = 0;
int nx_max = 0;
int ni_max = 0;
for (int i = 0; i <= N; i++)
{
nx_max = nx_max > nx[i] ? nx_max : nx[i];
nxu_max = nxu_max > (nx[i]+nu[i]) ? nxu_max : (nx[i]+nu[i]);
ni_max = ni_max > ni[i] ? ni_max : ni[i];
}
assign_and_advance_blasfeo_dvec_mem(nxu_max, &work->tmp_nxu, &c_ptr);
assign_and_advance_blasfeo_dvec_mem(ni_max, &work->tmp_ni, &c_ptr);
assign_and_advance_blasfeo_dvec_mem(nx_max, &work->dxnext_dy, &c_ptr);
if (opts->reuse_workspace)
{
#if defined(ACADOS_WITH_OPENMP)
// qp solver
work->qp_work = (void *) c_ptr;
c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// dynamics
for (int i = 0; i < N; i++)
{
work->dynamics[i] = c_ptr;
c_ptr += dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]);
}
// cost
for (int i = 0; i <= N; i++)
{
work->cost[i] = c_ptr;
c_ptr += cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]);
}
// constraints
for (int i = 0; i <= N; i++)
{
work->constraints[i] = c_ptr;
c_ptr += constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]);
}
#else
acados_size_t size_tmp = 0;
int tmp;
// qp solver
work->qp_work = (void *) c_ptr;
tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
// dynamics
for (int i = 0; i < N; i++)
{
work->dynamics[i] = c_ptr;
tmp = dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// cost
for (int i = 0; i <= N; i++)
{
work->cost[i] = c_ptr;
tmp = cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// constraints
for (int i = 0; i <= N; i++)
{
work->constraints[i] = c_ptr;
tmp = constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
c_ptr += size_tmp;
#endif
}
else
{
// qp solver
work->qp_work = (void *) c_ptr;
c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (int i = 0; i < N; i++)
{
work->dynamics[i] = c_ptr;
c_ptr += dynamics[i]->workspace_calculate_size(dynamics[i], dims->dynamics[i], opts->dynamics[i]);
}
// cost
for (int i = 0; i <= N; i++)
{
work->cost[i] = c_ptr;
c_ptr += cost[i]->workspace_calculate_size(cost[i], dims->cost[i], opts->cost[i]);
}
// constraints
for (int i = 0; i <= N; i++)
{
work->constraints[i] = c_ptr;
c_ptr += constraints[i]->workspace_calculate_size(constraints[i], dims->constraints[i], opts->constraints[i]);
}
}
assert((char *) work + ocp_nlp_workspace_calculate_size(config, dims, opts) >= c_ptr);
return work;
}
/************************************************
* functions
************************************************/
void ocp_nlp_alias_memory_to_submodules(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in,
ocp_nlp_out *nlp_out, ocp_nlp_opts *opts, ocp_nlp_memory *nlp_mem, ocp_nlp_workspace *nlp_work)
{
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel
{ // beginning of parallel region
#endif
int N = dims->N;
// alias to dynamics_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
for (int i = 0; i < N; i++)
{
config->dynamics[i]->memory_set_ux_ptr(nlp_out->ux+i, nlp_mem->dynamics[i]);
config->dynamics[i]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+i, nlp_mem->dynamics[i]);
config->dynamics[i]->memory_set_ux1_ptr(nlp_out->ux+i+1, nlp_mem->dynamics[i]);
config->dynamics[i]->memory_set_tmp_ux1_ptr(nlp_work->tmp_nlp_out->ux+i+1, nlp_mem->dynamics[i]);
config->dynamics[i]->memory_set_pi_ptr(nlp_out->pi+i, nlp_mem->dynamics[i]);
config->dynamics[i]->memory_set_tmp_pi_ptr(nlp_work->tmp_nlp_out->pi+i, nlp_mem->dynamics[i]);
config->dynamics[i]->memory_set_BAbt_ptr(nlp_mem->qp_in->BAbt+i, nlp_mem->dynamics[i]);
config->dynamics[i]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+i, nlp_mem->dynamics[i]);
config->dynamics[i]->memory_set_dzduxt_ptr(nlp_mem->dzduxt+i, nlp_mem->dynamics[i]);
config->dynamics[i]->memory_set_sim_guess_ptr(nlp_mem->sim_guess+i, nlp_mem->set_sim_guess+i, nlp_mem->dynamics[i]);
config->dynamics[i]->memory_set_z_alg_ptr(nlp_mem->z_alg+i, nlp_mem->dynamics[i]);
}
// alias to cost_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
for (int i = 0; i <= N; i++)
{
config->cost[i]->memory_set_ux_ptr(nlp_out->ux+i, nlp_mem->cost[i]);
config->cost[i]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+i, nlp_mem->cost[i]);
config->cost[i]->memory_set_z_alg_ptr(nlp_mem->z_alg+i, nlp_mem->cost[i]);
config->cost[i]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+i, nlp_mem->cost[i]);
config->cost[i]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+i, nlp_mem->cost[i]);
config->cost[i]->memory_set_Z_ptr(nlp_mem->qp_in->Z+i, nlp_mem->cost[i]);
}
// alias to constraints_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
for (int i = 0; i <= N; i++)
{
config->constraints[i]->memory_set_ux_ptr(nlp_out->ux+i, nlp_mem->constraints[i]);
config->constraints[i]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+i, nlp_mem->constraints[i]);
config->constraints[i]->memory_set_lam_ptr(nlp_out->lam+i, nlp_mem->constraints[i]);
config->constraints[i]->memory_set_tmp_lam_ptr(nlp_work->tmp_nlp_out->lam+i, nlp_mem->constraints[i]);
config->constraints[i]->memory_set_z_alg_ptr(nlp_mem->z_alg+i, nlp_mem->constraints[i]);
config->constraints[i]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+i, nlp_mem->constraints[i]);
config->constraints[i]->memory_set_DCt_ptr(nlp_mem->qp_in->DCt+i, nlp_mem->constraints[i]);
config->constraints[i]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+i, nlp_mem->constraints[i]);
config->constraints[i]->memory_set_idxb_ptr(nlp_mem->qp_in->idxb[i], nlp_mem->constraints[i]);
config->constraints[i]->memory_set_idxs_rev_ptr(nlp_mem->qp_in->idxs_rev[i], nlp_mem->constraints[i]);
config->constraints[i]->memory_set_idxe_ptr(nlp_mem->qp_in->idxe[i], nlp_mem->constraints[i]);
}
// alias to regularize memory
config->regularize->memory_set_RSQrq_ptr(dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem);
config->regularize->memory_set_rq_ptr(dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem);
config->regularize->memory_set_BAbt_ptr(dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem);
config->regularize->memory_set_b_ptr(dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem);
config->regularize->memory_set_idxb_ptr(dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem);
config->regularize->memory_set_DCt_ptr(dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem);
config->regularize->memory_set_ux_ptr(dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem);
config->regularize->memory_set_pi_ptr(dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem);
config->regularize->memory_set_lam_ptr(dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem);
// copy sampling times into dynamics model
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
// NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute;
// -> remove here and make sure precompute is called everywhere (e.g. Python interface).
for (int i = 0; i < N; i++)
{
config->dynamics[i]->model_set(config->dynamics[i], dims->dynamics[i],
nlp_in->dynamics[i], "T", nlp_in->Ts+i);
}
#if defined(ACADOS_WITH_OPENMP)
} // end of parallel region
#endif
return;
}
void ocp_nlp_initialize_submodules(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in,
ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int N = dims->N;
// NOTE: initialize is called at the start of every NLP solver call.
// It computes things in submodules based on stuff that can be changed by the user between
// subsequent solver calls, e.g. factorization of weight matrix.
// IN CONTRAST: precompute is only called once after solver creation
// -> computes things that are not expected to change between subsequent solver calls
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (int i = 0; i <= N; i++)
{
// cost
config->cost[i]->initialize(config->cost[i], dims->cost[i], in->cost[i],
opts->cost[i], mem->cost[i], work->cost[i]);
// dynamics
if (i < N)
config->dynamics[i]->initialize(config->dynamics[i], dims->dynamics[i],
in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]);
// constraints
config->constraints[i]->initialize(config->constraints[i], dims->constraints[i],
in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]);
}
return;
}
void ocp_nlp_initialize_t_slacks(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in,
ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
struct blasfeo_dvec *ineq_fun;
int N = dims->N;
int *ni = dims->ni;
int *ns = dims->ns;
int *nx = dims->nx;
int *nu = dims->nu;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (int i = 0; i <= N; i++)
{
// copy out->ux to tmp_nlp_out->ux, since this is used in compute_fun
blasfeo_dveccp(nx[i]+nu[i]+2*ns[i], out->ux+i, 0, work->tmp_nlp_out->ux+i, 0);
// evaluate inequalities
config->constraints[i]->compute_fun(config->constraints[i], dims->constraints[i],
in->constraints[i], opts->constraints[i],
mem->constraints[i], work->constraints[i]);
ineq_fun = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]);
// t = -ineq_fun
blasfeo_dveccpsc(2 * ni[i], -1.0, ineq_fun, 0, out->t + i, 0);
}
return;
}
void ocp_nlp_approximate_qp_matrices(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem,
ocp_nlp_workspace *work)
{
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
/* stage-wise multiple shooting lagrangian evaluation */
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (int i = 0; i <= N; i++)
{
// init Hessian to 0
blasfeo_dgese(nu[i] + nx[i], nu[i] + nx[i], 0.0, mem->qp_in->RSQrq+i, 0, 0);
if (i < N)
{
// Levenberg Marquardt term: Ts[i] * levenberg_marquardt * eye()
if (opts->levenberg_marquardt > 0.0)
blasfeo_ddiare(nu[i] + nx[i], in->Ts[i] * opts->levenberg_marquardt,
mem->qp_in->RSQrq+i, 0, 0);
// dynamics
config->dynamics[i]->update_qp_matrices(config->dynamics[i], dims->dynamics[i],
in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]);
}
else
{
// Levenberg Marquardt term: 1.0 * levenberg_marquardt * eye()
if (opts->levenberg_marquardt > 0.0)
blasfeo_ddiare(nu[i] + nx[i], opts->levenberg_marquardt,
mem->qp_in->RSQrq+i, 0, 0);
}
// cost
config->cost[i]->update_qp_matrices(config->cost[i], dims->cost[i], in->cost[i],
opts->cost[i], mem->cost[i], work->cost[i]);
// constraints
config->constraints[i]->update_qp_matrices(config->constraints[i], dims->constraints[i],
in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]);
}
/* collect stage-wise evaluations */
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (int i=0; i <= N; i++)
{
// nlp mem: cost_grad
struct blasfeo_dvec *cost_grad = config->cost[i]->memory_get_grad_ptr(mem->cost[i]);
blasfeo_dveccp(nv[i], cost_grad, 0, mem->cost_grad + i, 0);
// nlp mem: dyn_fun
if (i < N)
{
struct blasfeo_dvec *dyn_fun
= config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]);
blasfeo_dveccp(nx[i + 1], dyn_fun, 0, mem->dyn_fun + i, 0);
}
// nlp mem: dyn_adj
if (i < N)
{
struct blasfeo_dvec *dyn_adj
= config->dynamics[i]->memory_get_adj_ptr(mem->dynamics[i]);
blasfeo_dveccp(nu[i] + nx[i], dyn_adj, 0, mem->dyn_adj + i, 0);
}
else
{
blasfeo_dvecse(nu[N] + nx[N], 0.0, mem->dyn_adj + N, 0);
}
if (i > 0)
{
struct blasfeo_dvec *dyn_adj
= config->dynamics[i-1]->memory_get_adj_ptr(mem->dynamics[i-1]);
blasfeo_daxpy(nx[i], 1.0, dyn_adj, nu[i-1]+nx[i-1], mem->dyn_adj+i, nu[i],
mem->dyn_adj+i, nu[i]);
}
// nlp mem: ineq_fun
struct blasfeo_dvec *ineq_fun =
config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]);
blasfeo_dveccp(2 * ni[i], ineq_fun, 0, mem->ineq_fun + i, 0);
// nlp mem: ineq_adj
struct blasfeo_dvec *ineq_adj =
config->constraints[i]->memory_get_adj_ptr(mem->constraints[i]);
blasfeo_dveccp(nv[i], ineq_adj, 0, mem->ineq_adj + i, 0);
}
for (int i = 0; i <= N; i++)
{
// TODO(rien) where should the update happen??? move to qp update ???
// TODO(all): fix and move where appropriate
// if (i<N)
// {
// ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i];
// sim_opts *opts = dynamics_opts->sim_solver;
// if (opts->scheme != NULL && opts->scheme->type != exact)
// {
// for (int_t j = 0; j < nx; j++)
// BLASFEO_DVECEL(nlp_mem->cost_grad+i, nu+j) += work->sim_out[i]->grad[j];
// for (int_t j = 0; j < nu; j++)
// BLASFEO_DVECEL(nlp_mem->cost_grad+i, j) += work->sim_out[i]->grad[nx+j];
// }
// }
}
}
// update QP rhs for SQP (step prim var, abs dual var)
// TODO(all): move in dynamics, cost, constraints modules ???
void ocp_nlp_approximate_qp_vectors_sqp(ocp_nlp_config *config,
ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts,
ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (int i = 0; i <= N; i++)
{
// g
blasfeo_dveccp(nv[i], mem->cost_grad + i, 0, mem->qp_in->rqz + i, 0);
// b
if (i < N)
blasfeo_dveccp(nx[i + 1], mem->dyn_fun + i, 0, mem->qp_in->b + i, 0);
// d
blasfeo_dveccp(2 * ni[i], mem->ineq_fun + i, 0, mem->qp_in->d + i, 0);
}
}
void ocp_nlp_embed_initial_value(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts,
ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int *ni = dims->ni;
// constraints
config->constraints[0]->bounds_update(config->constraints[0], dims->constraints[0],
in->constraints[0], opts->constraints[0], mem->constraints[0], work->constraints[0]);
// nlp mem: ineq_fun
struct blasfeo_dvec *ineq_fun =
config->constraints[0]->memory_get_fun_ptr(mem->constraints[0]);
blasfeo_dveccp(2 * ni[0], ineq_fun, 0, mem->ineq_fun, 0);
// d
blasfeo_dveccp(2 * ni[0], mem->ineq_fun, 0, mem->qp_in->d, 0);
}
double ocp_nlp_compute_merit_gradient(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts,
ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
/* computes merit function gradient at iterate: out -- using already evaluated gradients of submodules
with weights: work->weight_merit_fun */
int i, j;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
double merit_grad = 0.0;
double weight;
// NOTE: step is in: mem->qp_out->ux
struct blasfeo_dvec *tmp_vec; // size nv
struct blasfeo_dvec tmp_vec_nxu = work->tmp_nxu; // size nxu
struct blasfeo_dvec dxnext_dy = work->dxnext_dy; // size nx
// cost
for (i=0; i<=N; i++)
{
tmp_vec = config->cost[i]->memory_get_grad_ptr(mem->cost[i]);
merit_grad += blasfeo_ddot(nv[i], tmp_vec, 0, mem->qp_out->ux + i, 0);
}
double merit_grad_cost = merit_grad;
/* dynamics */
double merit_grad_dyn = 0.0;
for (i=0; i<N; i++)
{
// get shooting node gap x_next(x_n, u_n) - x_{n+1};
tmp_vec = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]);
/* compute directional derivative of xnext with direction y -> dxnext_dy */
blasfeo_dgemv_t(nx[i]+nu[i], nx[i+1], 1.0, mem->qp_in->BAbt+i, 0, 0, mem->qp_out->ux+i, 0,
0.0, &dxnext_dy, 0, &dxnext_dy, 0);
/* add merit gradient contributions depending on sign of shooting gap */
for (j = 0; j < nx[i+1]; j++)
{
weight = BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j);
double deqj_dy = BLASFEO_DVECEL(&dxnext_dy, j) - BLASFEO_DVECEL(mem->qp_out->ux+(i+1), nu[i+1]+j);
{
if (BLASFEO_DVECEL(tmp_vec, j) > 0)
{
merit_grad_dyn += weight * deqj_dy;
// printf("\ndyn_contribution +%e, weight %e, deqj_dy %e, i %d, j %d", weight * deqj_dy, weight, deqj_dy, i, j);
}
else
{
merit_grad_dyn -= weight * deqj_dy;
// printf("\ndyn_contribution %e, weight %e, deqj_dy %e, i %d, j %d", -weight * deqj_dy, weight, deqj_dy, i, j);
}
}
}
}
/* inequality contributions */
// NOTE: slack bound inequalities are not considered here.
// They should never be infeasible. Only if explicitly initialized infeasible from outside.
int constr_index, slack_index_in_ux, slack_index;
ocp_qp_dims* qp_dims = mem->qp_in->dim;
int *nb = qp_dims->nb;
int *ng = qp_dims->ng;
int *ns = qp_dims->ns;
double merit_grad_ineq = 0.0;
double slack_step;
for (i=0; i<=N; i++)
{
tmp_vec = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]);
int *idxb = mem->qp_in->idxb[i];
if (ni[i] > 0)
{
// NOTE: loop could be simplified handling lower and upper constraints together.
for (j = 0; j < 2 * (nb[i] + ng[i]); j++) // 2 * ni
{
double constraint_val = BLASFEO_DVECEL(tmp_vec, j);
if (constraint_val > 0)
{
weight = BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j);
// find corresponding slack value
constr_index = j < nb[i]+ng[i] ? j : j-(nb[i]+ng[i]);
slack_index = mem->qp_in->idxs_rev[i][constr_index];
// if softened: add slack contribution
if (slack_index >= 0)
{
slack_index_in_ux = j < (nb[i]+ng[i]) ? nx[i] + nu[i] + slack_index
: nx[i] + nu[i] + slack_index + ns[i];
slack_step = BLASFEO_DVECEL(mem->qp_out->ux+i, slack_index_in_ux);
merit_grad_ineq -= weight * slack_step;
// printf("at node %d, ineq %d, idxs_rev[%d] = %d\n", i, j, constr_index, slack_index);
// printf("slack contribution: uxs[%d] = %e\n", slack_index_in_ux, slack_step);
}
// NOTE: the inequalities are internally organized in the following order:
// [ lbu lbx lg lh lphi ubu ubx ug uh uphi;
// lsbu lsbx lsg lsh lsphi usbu usbx usg ush usphi]
// printf("constraint %d %d is active with value %e", i, j, constraint_val);
if (j < nb[i])
{
// printf("lower idxb[%d] = %d dir %f, constraint_val %f, nb = %d\n", j, idxb[j], BLASFEO_DVECEL(mem->qp_out->ux, idxb[j]), constraint_val, nb[i]);
merit_grad_ineq += weight * BLASFEO_DVECEL(mem->qp_out->ux+i, idxb[j]);
}
else if (j < nb[i] + ng[i])
{
// merit_grad_ineq += weight * mem->qp_in->DCt_j * dux
blasfeo_dcolex(nx[i] + nu[i], mem->qp_in->DCt+i, j - nb[i], 0, &tmp_vec_nxu, 0);
merit_grad_ineq += weight * blasfeo_ddot(nx[i] + nu[i], &tmp_vec_nxu, 0, mem->qp_out->ux+i, 0);
// printf("general linear constraint lower contribution = %e, val = %e\n", blasfeo_ddot(nx[i] + nu[i], &tmp_vec_nxu, 0, mem->qp_out->ux+i, 0), constraint_val);
}
else if (j < 2*nb[i] + ng[i])
{
// printf("upper idxb[%d] = %d dir %f, constraint_val %f, nb = %d\n", j-nb[i]-ng[i], idxb[j-nb[i]-ng[i]], BLASFEO_DVECEL(mem->qp_out->ux, idxb[j-nb[i]-ng[i]]), constraint_val, nb[i]);
merit_grad_ineq += weight * BLASFEO_DVECEL(mem->qp_out->ux+i, idxb[j-nb[i]-ng[i]]);
}
else if (j < 2*nb[i] + 2*ng[i])
{
blasfeo_dcolex(nx[i] + nu[i], mem->qp_in->DCt+i, j - 2*nb[i] - ng[i], 0, &tmp_vec_nxu, 0);
merit_grad_ineq += weight * blasfeo_ddot(nx[i] + nu[i], &tmp_vec_nxu, 0, mem->qp_out->ux+i, 0);
// printf("general linear constraint upper contribution = %e, val = %e\n", blasfeo_ddot(nx[i] + nu[i], &tmp_vec_nxu, 0, mem->qp_out->ux+i, 0), constraint_val);
}
}
}
}
}
// print_ocp_qp_dims(qp_dims);
// print_ocp_qp_in(mem->qp_in);
merit_grad = merit_grad_cost + merit_grad_dyn + merit_grad_ineq;
if (opts->print_level > 1)
printf("computed merit_grad = %e, merit_grad_cost = %e, merit_grad_dyn = %e, merit_grad_ineq = %e\n", merit_grad, merit_grad_cost, merit_grad_dyn, merit_grad_ineq);
return merit_grad;
}
static double ocp_nlp_get_violation(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts,
ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
// computes constraint violation infinity norm
// assumes constraint functions are evaluated before, e.g. done in ocp_nlp_evaluate_merit_fun
int i, j;
int N = dims->N;
int *nx = dims->nx;
int *ni = dims->ni;
struct blasfeo_dvec *tmp_fun_vec;
double violation = 0.0;
double tmp;
for (i=0; i<N; i++)
{
tmp_fun_vec = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]);
for (j=0; j<nx[i+1]; j++)
{
tmp = fabs(BLASFEO_DVECEL(tmp_fun_vec, j));
violation = tmp > violation ? tmp : violation;
}
}
for (i=0; i<=N; i++)
{
tmp_fun_vec = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]);
for (j=0; j<2*ni[i]; j++)
{
// Note constraint violation corresponds to > 0
tmp = BLASFEO_DVECEL(tmp_fun_vec, j);
violation = tmp > violation ? tmp : violation;
}
}
return violation;
}
double ocp_nlp_evaluate_merit_fun(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts,
ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
/* computes merit function value at iterate: tmp_nlp_out, with weights: work->weight_merit_fun */
//int j;
int N = dims->N;
int *nx = dims->nx;
int *ni = dims->ni;
double merit_fun = 0.0;
// compute fun value
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (int i=0; i<=N; i++)
{
// cost
config->cost[i]->compute_fun(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i],
mem->cost[i], work->cost[i]);
}
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (int i=0; i<N; i++)
{
// dynamics
config->dynamics[i]->compute_fun(config->dynamics[i], dims->dynamics[i], in->dynamics[i],
opts->dynamics[i], mem->dynamics[i], work->dynamics[i]);
}
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (int i=0; i<=N; i++)
{
// constr
config->constraints[i]->compute_fun(config->constraints[i], dims->constraints[i],
in->constraints[i], opts->constraints[i],
mem->constraints[i], work->constraints[i]);
}
double *tmp_fun;
double tmp;
struct blasfeo_dvec *tmp_fun_vec;
double cost_fun = 0.0;
for(int i=0; i<=N; i++)
{
tmp_fun = config->cost[i]->memory_get_fun_ptr(mem->cost[i]);
cost_fun += *tmp_fun;
}
double dyn_fun = 0.0;
for(int i=0; i<N; i++)
{
tmp_fun_vec = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]);
// printf("\nMerit: dyn will multiply tmp_fun, weights %d\n", i);
// blasfeo_print_exp_tran_dvec(nx[i+1], tmp_fun_vec, 0);
// blasfeo_print_exp_tran_dvec(nx[i+1], work->weight_merit_fun->pi+i, 0);
for(int j=0; j<nx[i+1]; j++)
{
// printf("\n%e %e\n", fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)), fabs(BLASFEO_DVECEL(tmp_fun_vec, j)));
dyn_fun += fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)) * fabs(BLASFEO_DVECEL(tmp_fun_vec, j));
}
}
double constr_fun = 0.0;
for(int i=0; i<=N; i++)
{
// printf("\ni %d\n", i);
tmp_fun_vec = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]);
// blasfeo_print_exp_tran_dvec(2*ni[i], tmp_fun_vec, 0);
// blasfeo_print_exp_tran_dvec(2*ni[i], work->weight_merit_fun->lam+i, 0);
for (int j=0; j<2*ni[i]; j++)
{
tmp = BLASFEO_DVECEL(tmp_fun_vec, j);
if (tmp > 0.0)
{
// tmp = constraint violation
// printf("IN merit fun: ineq i %d, j %d tmp_fun %e, multiplier %e\n", i, j, tmp, BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j));
constr_fun += fabs(BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)) * tmp;
}
}
}
merit_fun = cost_fun + dyn_fun + constr_fun;
// printf("Merit fun: %e cost: %e dyn: %e constr: %e\n", merit_fun, cost_fun, dyn_fun, constr_fun);
return merit_fun;
}
double ocp_nlp_line_search(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in,
ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work,
int check_early_termination)
{
int i, j;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *ni = dims->ni;
double alpha = opts->step_length;
double tmp0, tmp1, merit_fun1;
ocp_qp_out *qp_out = mem->qp_out;
// Line search version Jonathan
// Following Leineweber1999, Section "3.5.1 Line Search Globalization"
// TODO: check out more advanced step search Leineweber1995
if (opts->globalization == MERIT_BACKTRACKING)
{
// copy out (current iterate) to work->tmp_nlp_out
for (i = 0; i <= N; i++)
blasfeo_dveccp(nv[i], out->ux+i, 0, work->tmp_nlp_out->ux+i, 0);
// for (i = 0; i < N; i++)
// blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->tmp_nlp_out->pi+i, 0);
// for (i = 0; i <= N; i++)
// blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->tmp_nlp_out->lam+i, 0);
// linear update of algebraic variables using state and input sensitivity
// if (i < N)
// {
// blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0);
// }
/* modify/initialize merit function weights (Leineweber1999 M5.1, p.89) */
if (mem->sqp_iter[0]==0)
{
// initialize weights
// equality merit weights = abs( eq multipliers of qp_sol )
for (i = 0; i < N; i++)
{
for (j=0; j<nx[i+1]; j++)
{
// tmp0 = fabs(BLASFEO_DVECEL(out->pi+i, j));
tmp0 = fabs(BLASFEO_DVECEL(qp_out->pi+i, j));
}
}
for (i = 0; i <= N; i++)
{
blasfeo_dveccp(2*ni[i], qp_out->lam+i, 0, work->weight_merit_fun->lam+i, 0);
}
}
else
{
// update weights
// printf("merit fun: update weights, sqp_iter = %d\n", mem->sqp_iter[0]);
for (i = 0; i < N; i++)
{
for(j=0; j<nx[i+1]; j++)
{
// abs(lambda) (LW)
tmp0 = fabs(BLASFEO_DVECEL(qp_out->pi+i, j));
// .5 * (abs(lambda) + sigma)
tmp1 = 0.5 * (tmp0 + BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j));
BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j) = tmp0 > tmp1 ? tmp0 : tmp1;
}
}
for (i = 0; i <= N; i++)
{
for(j=0; j<2*ni[i]; j++)
{
// mu (LW)
tmp0 = BLASFEO_DVECEL(qp_out->lam+i, j);
// .5 * (mu + tau)
tmp1 = 0.5 * (tmp0 + BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j));
BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j) = tmp0>tmp1 ? tmp0 : tmp1;
}
}
}
if (1) // (mem->sqp_iter[0]!=0) // TODO: why does Leineweber do full step in first SQP iter?
{
double merit_fun0 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work);
double reduction_factor = opts->alpha_reduction;
double max_next_merit_fun_val = merit_fun0;
double eps_sufficient_descent = opts->eps_sufficient_descent;
double dmerit_dy = 0.0;
alpha = 1.0;
// to avoid armijo evaluation and loop when checking if SOC should be done
if (check_early_termination)
{
// TMP:
// printf("tmp: merit_grad eval in early termination\n");
// dmerit_dy = ocp_nlp_compute_merit_gradient(config, dims, in, out, opts, mem, work);
// TODO(oj): should the merit weight update be undone in case of early termination?
double violation_current = ocp_nlp_get_violation(config, dims, in, out, opts, mem, work);
// tmp_nlp_out = out + alpha * qp_out
for (i = 0; i <= N; i++)
blasfeo_daxpy(nv[i], alpha, qp_out->ux+i, 0, out->ux+i, 0, work->tmp_nlp_out->ux+i, 0);
merit_fun1 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work);
double violation_step = ocp_nlp_get_violation(config, dims, in, out, opts, mem, work);
if (opts->print_level > 0)
{
printf("\npreliminary line_search: merit0 %e, merit1 %e; viol_current %e, viol_step %e\n", merit_fun0, merit_fun1, violation_current, violation_step);
}
if (merit_fun1 < merit_fun0 && violation_step < violation_current)
{
// full step if merit and constraint violation improves
// TODO: check armijo in this case?
return alpha;
}
else // this implies SOC will be done
{
return reduction_factor * reduction_factor;
}
}
/* actual Line Search*/
if (opts->line_search_use_sufficient_descent)
{
// check Armijo-type sufficient descent condition Leinweber1999 (2.35);
dmerit_dy = ocp_nlp_compute_merit_gradient(config, dims, in, out, opts, mem, work);
if (dmerit_dy > 0.0)
{
if (dmerit_dy > 1e-6 && opts->print_level > 0)
{
printf("\nacados line search: found dmerit_dy = %e > 0. Setting it to 0.0 instead\n", dmerit_dy);
}
dmerit_dy = 0.0;
}
}
// From Leineweber1999: eq (3.64) -> only relevant for adaptive integrators looking at Remark 3.2.
// "It is noteworthy that our practical implementation takes into account the potential nonsmoothness introduced by the fact that certain components of the penalty function - namely the continuity condition residuals - are evaluated only within integration tolerance."
// double sum_pi = 0.0;
// for (i = 0; i < N; i++)
// {
// for (j = 0; j < dims->nx[i+1]; j++)
// sum_pi += BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j);
// }
// double relaxed_val = 2.0 * 1e-6 * sum_pi;
// if (abs(merit_fun0 - merit_fun1) < relaxed_val)
// {
// printf("\nexiting because of relaxed_val.");
// break;
// }
for (j=0; alpha*reduction_factor > opts->alpha_min; j++)
{
// tmp_nlp_out = out + alpha * qp_out
for (i = 0; i <= N; i++)
blasfeo_daxpy(nv[i], alpha, qp_out->ux+i, 0, out->ux+i, 0, work->tmp_nlp_out->ux+i, 0);
merit_fun1 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work);
if (opts->print_level > 1)
{
printf("backtracking %d, merit_fun1 = %e, merit_fun0 %e\n", j, merit_fun1, merit_fun0);
}
// if (merit_fun1 < merit_fun0 && merit_fun1 > max_next_merit_fun_val)
// {
// printf("\nalpha %f would be accepted without sufficient descent condition", alpha);
// }
max_next_merit_fun_val = merit_fun0 + eps_sufficient_descent * dmerit_dy * alpha;
if (merit_fun1 < max_next_merit_fun_val)
{
break;
}
else
{
alpha *= reduction_factor;
}
}
}
}
return alpha;
}
void ocp_nlp_update_variables_sqp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in,
ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work, double alpha)
{
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
int *nz = dims->nz;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (int i = 0; i <= N; i++)
{
// step in primal variables
blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux + i, 0, out->ux + i, 0, out->ux + i, 0);
// update dual variables
if (opts->full_step_dual)
{
blasfeo_dveccp(2*ni[i], mem->qp_out->lam+i, 0, out->lam+i, 0);
if (i < N)
{
blasfeo_dveccp(nx[i+1], mem->qp_out->pi+i, 0, out->pi+i, 0);
}
}
else
{
// update duals with alpha step
blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->lam+i, 0);
blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->lam+i, 0, out->lam+i, 0, out->lam+i, 0);
if (i < N)
{
blasfeo_dvecsc(nx[i+1], 1.0-alpha, out->pi+i, 0);
blasfeo_daxpy(nx[i+1], alpha, mem->qp_out->pi+i, 0, out->pi+i, 0, out->pi+i, 0);
}
}
// update slack values
blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->t+i, 0);
blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->t+i, 0, out->t+i, 0, out->t+i, 0);
// linear update of algebraic variables using state and input sensitivity
if (i < N)
{
blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0,
mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0);
}
}
}
/************************************************
* residuals
************************************************/
acados_size_t ocp_nlp_res_calculate_size(ocp_nlp_dims *dims)
{
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
acados_size_t size = sizeof(ocp_nlp_res);
size += 3 * (N + 1) * sizeof(struct blasfeo_dvec); // res_stat res_ineq res_comp
size += 1 * N * sizeof(struct blasfeo_dvec); // res_eq
for (int i = 0; i < N; i++)
{
size += 1 * blasfeo_memsize_dvec(nv[i]); // res_stat
size += 1 * blasfeo_memsize_dvec(nx[i + 1]); // res_eq
size += 2 * blasfeo_memsize_dvec(2 * ni[i]); // res_ineq res_comp
}
size += 1 * blasfeo_memsize_dvec(nv[N]); // res_stat
size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // res_ineq res_comp
size += 1 * blasfeo_memsize_dvec(N); // tmp
size += 8; // initial align
size += 8; // blasfeo_struct align
size += 64; // blasfeo_mem align
make_int_multiple_of(8, &size);
return size;
}
ocp_nlp_res *ocp_nlp_res_assign(ocp_nlp_dims *dims, void *raw_memory)
{
char *c_ptr = (char *) raw_memory;
// extract sizes
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
// initial align
align_char_to(8, &c_ptr);
// struct
ocp_nlp_res *res = (ocp_nlp_res *) c_ptr;
c_ptr += sizeof(ocp_nlp_res);
// blasfeo_struct align
align_char_to(8, &c_ptr);
// res_stat
assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_stat, &c_ptr);
// res_eq
assign_and_advance_blasfeo_dvec_structs(N, &res->res_eq, &c_ptr);
// res_ineq
assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_ineq, &c_ptr);
// res_comp
assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_comp, &c_ptr);
// blasfeo_mem align
align_char_to(64, &c_ptr);
// res_stat
for (int i = 0; i <= N; i++)
{
assign_and_advance_blasfeo_dvec_mem(nv[i], res->res_stat + i, &c_ptr);
}
// res_eq
for (int i = 0; i < N; i++)
{
assign_and_advance_blasfeo_dvec_mem(nx[i + 1], res->res_eq + i, &c_ptr);
}
// res_ineq
for (int i = 0; i <= N; i++)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[i], res->res_ineq + i, &c_ptr);
}
// res_comp
for (int i = 0; i <= N; i++)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[i], res->res_comp + i, &c_ptr);
}
assign_and_advance_blasfeo_dvec_mem(N, &res->tmp, &c_ptr);
res->memsize = ocp_nlp_res_calculate_size(dims);
assert((char *) raw_memory + res->memsize >= c_ptr);
return res;
}
void ocp_nlp_res_compute(ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_res *res,
ocp_nlp_memory *mem)
{
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
double tmp_res;
// res_stat
for (int i = 0; i <= N; i++)
{
blasfeo_daxpy(nv[i], -1.0, mem->ineq_adj + i, 0, mem->cost_grad + i, 0, res->res_stat + i,
0);
blasfeo_daxpy(nu[i] + nx[i], -1.0, mem->dyn_adj + i, 0, res->res_stat + i, 0,
res->res_stat + i, 0);
blasfeo_dvecnrm_inf(nv[i], res->res_stat + i, 0, &tmp_res);
blasfeo_dvecse(1, tmp_res, &res->tmp, i);
}
blasfeo_dvecnrm_inf(N+1, &res->tmp, 0, &res->inf_norm_res_stat);
// res_eq
for (int i = 0; i < N; i++)
{
blasfeo_dveccp(nx[i + 1], mem->dyn_fun + i, 0, res->res_eq + i, 0);
blasfeo_dvecnrm_inf(nx[i + 1], res->res_eq + i, 0, &tmp_res);
blasfeo_dvecse(1, tmp_res, &res->tmp, i);
}
blasfeo_dvecnrm_inf(N, &res->tmp, 0, &res->inf_norm_res_eq);
// res_ineq
res->inf_norm_res_ineq = 0.0;
for (int i = 0; i <= N; i++)
{
blasfeo_daxpy(2 * ni[i], 1.0, out->t + i, 0, mem->ineq_fun + i, 0, res->res_ineq + i, 0);
blasfeo_dvecnrm_inf(2 * ni[i], res->res_ineq + i, 0, &tmp_res);
blasfeo_dvecse(1, tmp_res, &res->tmp, i);
}
blasfeo_dvecnrm_inf(N+1, &res->tmp, 0, &res->inf_norm_res_ineq);
// res_comp
res->inf_norm_res_comp = 0.0;
for (int i = 0; i <= N; i++)
{
blasfeo_dvecmul(2 * ni[i], out->lam + i, 0, out->t + i, 0, res->res_comp + i, 0);
blasfeo_dvecnrm_inf(2 * ni[i], res->res_comp + i, 0, &tmp_res);
blasfeo_dvecse(1, tmp_res, &res->tmp, i);
}
blasfeo_dvecnrm_inf(N+1, &res->tmp, 0, &res->inf_norm_res_comp);
// printf("computed residuals stat: %e, eq: %e, ineq: %e, comp: %e\n", res->inf_norm_res_stat, res->inf_norm_res_eq,
// res->inf_norm_res_ineq, res->inf_norm_res_comp);
}
void ocp_nlp_res_get_inf_norm(ocp_nlp_res *res, double *out)
{
double norm = res->inf_norm_res_stat;
norm = (res->inf_norm_res_eq > norm) ? res->inf_norm_res_eq : norm;
norm = (res->inf_norm_res_ineq > norm) ? res->inf_norm_res_ineq : norm;
norm = (res->inf_norm_res_comp > norm) ? res->inf_norm_res_comp : norm;
*out = norm;
return;
}
void ocp_nlp_cost_compute(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in,
ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
// extract dims
int N = dims->N;
double* tmp_cost = NULL;
double total_cost = 0.0;
for (int i = 0; i <= N; i++)
{
// set pointers
// NOTE(oj): the cost compute function takes the tmp_ux_ptr as input,
// since it is also used for globalization,
// especially with primal variables that are NOT current SQP iterates.
config->cost[i]->memory_set_tmp_ux_ptr(out->ux+i, mem->cost[i]);
config->cost[i]->compute_fun(config->cost[i], dims->cost[i], in->cost[i],
opts->cost[i], mem->cost[i], work->cost[i]);
tmp_cost = config->cost[i]->memory_get_fun_ptr(mem->cost[i]);
// printf("cost at stage %d = %e, total = %e\n", i, *tmp_cost, total_cost);
total_cost += *tmp_cost;
}
mem->cost_value = total_cost;
// printf("\ncomputed total cost: %e\n", total_cost);
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 16;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(32*t2-Nz-12,16)),t1);t3<=min(min(min(floord(Nt+Ny-4,16),floord(16*t1+Ny+29,16)),floord(32*t2+Ny+28,16)),floord(32*t1-32*t2+Nz+Ny+27,16));t3++) {
for (t4=max(max(max(0,ceild(t1-127,128)),ceild(32*t2-Nz-2044,2048)),ceild(16*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(16*t1+Nx+29,2048)),floord(32*t2+Nx+28,2048)),floord(16*t3+Nx+12,2048)),floord(32*t1-32*t2+Nz+Nx+27,2048));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),16*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),16*t3+14),2048*t4+2046),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_unaryop__abs_fp64_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp64_int64
// op(A') function: GB_tran__abs_fp64_int64
// C type: double
// A type: int64_t
// cast: double cij = (double) aij
// unaryop: cij = fabs (aij)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabs (x) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp64_int64
(
double *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cross_correlate_2d.c | // MIT License
//
// Copyright (c) 2021 Florian
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include <stdio.h>
#include <stdlib.h>
void cross_correlate_2d(
const size_t *s_i, const double *input,
const size_t *s_k, const double *kernel,
const size_t *s_o, double *output)
{
#pragma omp parallel
#pragma omp for
for (int i = 0; i < s_o[0]; ++i)
{
for (int j = 0; j < s_o[1]; ++j)
{
output[i * s_o[1] + j] = 0;
for (int k = 0; k < s_k[0]; ++k)
{
for (int l = 0; l < s_k[1]; ++l)
{
output[i * s_o[1] + j] +=
input[(i + k) * s_i[1] + j + l] *
kernel[k * s_k[1] + l];
}
}
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.